/linux-6.3-rc2/drivers/infiniband/sw/rxe/ |
A D | rxe_mr.c | 265 mr_page_size(mr) - page_offset); in rxe_mr_copy_xarray() 273 page_offset = 0; in rxe_mr_copy_xarray() 293 PAGE_SIZE - page_offset); in rxe_mr_copy_dma() 302 page_offset = 0; in rxe_mr_copy_dma() 431 unsigned int page_offset; in rxe_flush_pmem_iova() local 467 page_offset = 0; in rxe_flush_pmem_iova() 479 unsigned int page_offset; in rxe_mr_do_atomic_op() local 508 if (unlikely(page_offset & 0x7)) { in rxe_mr_do_atomic_op() 520 va[page_offset >> 3] = swap_add; in rxe_mr_do_atomic_op() 523 va[page_offset >> 3] = value; in rxe_mr_do_atomic_op() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/cavium/liquidio/ |
A D | octeon_network.h | 298 pg_info->page_offset = 0; in recv_buffer_alloc() 300 skb_pg_info->page_offset = 0; in recv_buffer_alloc() 324 skb_pg_info->page_offset = 0; in recv_buffer_fast_alloc() 348 pg_info->page_offset = 0; in recv_buffer_recycle() 353 if (pg_info->page_offset == 0) in recv_buffer_recycle() 354 pg_info->page_offset = LIO_RXBUFFER_SZ; in recv_buffer_recycle() 356 pg_info->page_offset = 0; in recv_buffer_recycle() 384 skb_pg_info->page_offset = pg_info->page_offset; in recv_buffer_reuse() 398 pg_info->page_offset = 0; in recv_buffer_destroy() 415 pg_info->page_offset = 0; in recv_buffer_free() [all …]
|
/linux-6.3-rc2/drivers/scsi/fnic/ |
A D | fnic_trace.c | 110 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data() 152 fnic_trace_entries.page_offset[rd_idx]; in fnic_get_trace_data() 476 fnic_trace_entries.page_offset = in fnic_trace_buf_init() 479 if (!fnic_trace_entries.page_offset) { in fnic_trace_buf_init() 518 if (fnic_trace_entries.page_offset) { in fnic_trace_free() 520 fnic_trace_entries.page_offset = NULL; in fnic_trace_free() 564 fc_trace_entries.page_offset = in fnic_fc_trace_init() 567 if (!fc_trace_entries.page_offset) { in fnic_fc_trace_init() 606 if (fc_trace_entries.page_offset) { in fnic_fc_trace_free() 608 fc_trace_entries.page_offset = NULL; in fnic_fc_trace_free() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/vmwgfx/ |
A D | vmwgfx_page_dirty.c | 381 unsigned long page_offset; in vmw_bo_vm_mkwrite() local 398 if (unlikely(page_offset >= PFN_UP(bo->resource->size))) { in vmw_bo_vm_mkwrite() 404 !test_bit(page_offset, &vbo->dirty->bitmap[0])) { in vmw_bo_vm_mkwrite() 407 __set_bit(page_offset, &dirty->bitmap[0]); in vmw_bo_vm_mkwrite() 408 dirty->start = min(dirty->start, page_offset); in vmw_bo_vm_mkwrite() 409 dirty->end = max(dirty->end, page_offset + 1); in vmw_bo_vm_mkwrite() 437 unsigned long page_offset; in vmw_bo_vm_fault() local 439 page_offset = vmf->pgoff - in vmw_bo_vm_fault() 441 if (page_offset >= PFN_UP(bo->resource->size) || in vmw_bo_vm_fault() 442 vmw_resources_clean(vbo, page_offset, in vmw_bo_vm_fault() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/ttm/ |
A D | ttm_bo_vm.c | 85 unsigned long page_offset) in ttm_bo_io_mem_pfn() argument 90 return bdev->funcs->io_mem_pfn(bo, page_offset); in ttm_bo_io_mem_pfn() 92 return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset; in ttm_bo_io_mem_pfn() 188 unsigned long page_offset; in ttm_bo_vm_fault_reserved() local 210 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + in ttm_bo_vm_fault_reserved() 215 if (unlikely(page_offset >= PFN_UP(bo->base.size))) in ttm_bo_vm_fault_reserved() 240 pfn = ttm_bo_io_mem_pfn(bo, page_offset); in ttm_bo_vm_fault_reserved() 242 page = ttm->pages[page_offset]; in ttm_bo_vm_fault_reserved() 270 if (unlikely(++page_offset >= page_last)) in ttm_bo_vm_fault_reserved()
|
/linux-6.3-rc2/scripts/ |
A D | leaking_addresses.pl | 304 state $page_offset = get_page_offset(); 310 if (hex($match) < $page_offset) { 320 my $page_offset; 328 $page_offset = get_kernel_config_option('CONFIG_PAGE_OFFSET'); 329 if (!$page_offset) { 332 return $page_offset;
|
/linux-6.3-rc2/fs/hfs/ |
A D | bnode.c | 25 off += node->page_offset; in hfs_bnode_read() 77 off += node->page_offset; in hfs_bnode_write() 101 off += node->page_offset; in hfs_bnode_clear() 116 src += src_node->page_offset; in hfs_bnode_copy() 117 dst += dst_node->page_offset; in hfs_bnode_copy() 133 src += node->page_offset; in hfs_bnode_move() 134 dst += node->page_offset; in hfs_bnode_move() 288 node->page_offset = off & ~PAGE_MASK; in __hfs_bnode_create() 344 node->page_offset); in hfs_bnode_find() 434 memzero_page(*pagep, node->page_offset, in hfs_bnode_create()
|
/linux-6.3-rc2/net/ceph/ |
A D | messenger.c | 739 size_t *page_offset, in ceph_msg_data_bio_next() argument 745 *page_offset = bv.bv_offset; in ceph_msg_data_bio_next() 794 size_t *page_offset, in ceph_msg_data_bvecs_next() argument 800 *page_offset = bv.bv_offset; in ceph_msg_data_bvecs_next() 853 size_t *page_offset, size_t *length) in ceph_msg_data_pages_next() argument 860 BUG_ON(cursor->page_offset >= PAGE_SIZE); in ceph_msg_data_pages_next() 862 *page_offset = cursor->page_offset; in ceph_msg_data_pages_next() 877 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; in ceph_msg_data_pages_advance() 878 if (!bytes || cursor->page_offset) in ceph_msg_data_pages_advance() 921 size_t *page_offset, size_t *length) in ceph_msg_data_pagelist_next() argument [all …]
|
/linux-6.3-rc2/drivers/infiniband/hw/mlx5/ |
A D | mem.c | 65 u64 page_offset; in __mlx5_umem_find_best_quantized_pgoff() local 78 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff() 79 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { in __mlx5_umem_find_best_quantized_pgoff() 81 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff() 92 (unsigned long)page_offset / (page_size / scale); in __mlx5_umem_find_best_quantized_pgoff()
|
A D | srq_cmd.c | 17 u32 page_offset = in->page_offset; in get_pas_size() local 21 u32 rq_sz_po = rq_sz + (page_offset * po_quanta); in get_pas_size() 34 MLX5_SET(wq, wq, page_offset, in->page_offset); in set_wq() 47 MLX5_SET(srqc, srqc, page_offset, in->page_offset); in set_srqc() 62 in->page_offset = MLX5_GET(wq, wq, page_offset); in get_wq() 75 in->page_offset = MLX5_GET(srqc, srqc, page_offset); in get_srqc() 111 MLX5_ADAPTER_PAGE_SHIFT, page_offset, \ 112 64, &(in)->page_offset))
|
/linux-6.3-rc2/drivers/mtd/tests/ |
A D | nandbiterrs.c | 45 static unsigned page_offset; variable 46 module_param(page_offset, uint, S_IRUGO); 47 MODULE_PARM_DESC(page_offset, "Page number relative to dev start"); 358 offset = (loff_t)page_offset * mtd->writesize; in mtd_nandbiterrs_init() 362 page_offset, offset, eraseblock); in mtd_nandbiterrs_init()
|
/linux-6.3-rc2/drivers/nvmem/ |
A D | rave-sp-eeprom.c | 161 const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE; in rave_sp_eeprom_page_access() local 172 if (WARN_ON(data_len > sizeof(page.data) - page_offset)) in rave_sp_eeprom_page_access() 187 memcpy(&page.data[page_offset], data, data_len); in rave_sp_eeprom_page_access() 199 memcpy(data, &page.data[page_offset], data_len); in rave_sp_eeprom_page_access()
|
/linux-6.3-rc2/drivers/iommu/iommufd/ |
A D | pages.c | 380 unsigned int page_offset = 0; in batch_from_domain() local 386 page_offset = area->page_offset; in batch_from_domain() 396 iova += PAGE_SIZE - page_offset; in batch_from_domain() 397 page_offset = 0; in batch_from_domain() 408 unsigned int page_offset = 0; in raw_pages_from_domain() local 414 page_offset = area->page_offset; in raw_pages_from_domain() 419 page_offset = 0; in raw_pages_from_domain() 480 unsigned int page_offset = 0; in batch_to_domain() local 489 page_offset = area->page_offset; in batch_to_domain() 495 page_offset); in batch_to_domain() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/qxl/ |
A D | qxl_image.c | 166 unsigned int page_base, page_offset, out_offset; in qxl_image_init_helper() local 175 page_offset = offset_in_page(out_offset); in qxl_image_init_helper() 176 size = min((int)(PAGE_SIZE - page_offset), remain); in qxl_image_init_helper() 179 k_data = ptr + page_offset; in qxl_image_init_helper()
|
/linux-6.3-rc2/fs/hfsplus/ |
A D | bnode.c | 27 off += node->page_offset; in hfs_bnode_read() 78 off += node->page_offset; in hfs_bnode_write() 106 off += node->page_offset; in hfs_bnode_clear() 130 src += src_node->page_offset; in hfs_bnode_copy() 131 dst += dst_node->page_offset; in hfs_bnode_copy() 184 src += node->page_offset; in hfs_bnode_move() 185 dst += node->page_offset; in hfs_bnode_move() 438 node->page_offset = off & ~PAGE_MASK; in __hfs_bnode_create() 495 node->page_offset); in hfs_bnode_find() 585 memzero_page(*pagep, node->page_offset, in hfs_bnode_create()
|
A D | wrapper.c | 74 unsigned int page_offset = offset_in_page(buf); in hfsplus_submit_bio() local 75 unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset, in hfsplus_submit_bio() 78 ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); in hfsplus_submit_bio()
|
/linux-6.3-rc2/drivers/net/ethernet/intel/iavf/ |
A D | iavf_txrx.c | 716 rx_bi->page_offset, in iavf_clean_rx_ring() 729 rx_bi->page_offset = 0; in iavf_clean_rx_ring() 936 bi->page_offset, in iavf_alloc_rx_buffers() 1178 new_buff->page_offset = old_buff->page_offset; in iavf_reuse_rx_page() 1272 rx_buffer->page_offset ^= truesize; in iavf_add_rx_frag() 1274 rx_buffer->page_offset += truesize; in iavf_add_rx_frag() 1299 rx_buffer->page_offset, in iavf_get_rx_buffer() 1357 rx_buffer->page_offset + headlen, in iavf_construct_skb() 1362 rx_buffer->page_offset ^= truesize; in iavf_construct_skb() 1413 rx_buffer->page_offset ^= truesize; in iavf_build_skb() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/sfc/falcon/ |
A D | rx.c | 59 return page_address(buf->page) + buf->page_offset; in ef4_rx_buf_va() 157 unsigned int page_offset; in ef4_init_rx_buffers() local 188 page_offset = sizeof(struct ef4_rx_page_state); in ef4_init_rx_buffers() 195 rx_buf->page_offset = page_offset + efx->rx_ip_align; in ef4_init_rx_buffers() 201 page_offset += efx->rx_page_buf_step; in ef4_init_rx_buffers() 202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE); in ef4_init_rx_buffers() 454 rx_buf->page, rx_buf->page_offset, in ef4_rx_packet_gro() 499 rx_buf->page_offset += hdr_len; in ef4_rx_mk_skb() 504 rx_buf->page, rx_buf->page_offset, in ef4_rx_mk_skb() 586 rx_buf->page_offset += efx->rx_prefix_size; in ef4_rx_packet()
|
/linux-6.3-rc2/drivers/net/ethernet/google/gve/ |
A D | gve_rx_dqo.c | 164 buf_state->page_info.page_offset = 0; in gve_alloc_page_dqo() 366 buf_state->page_info.page_offset); in gve_rx_post_buffers_dqo() 398 buf_state->page_info.page_offset; in gve_try_recycle_buf() 402 buf_state->page_info.page_offset += data_buffer_size; in gve_try_recycle_buf() 403 buf_state->page_info.page_offset &= (PAGE_SIZE - 1); in gve_try_recycle_buf() 408 if (buf_state->page_info.page_offset == in gve_try_recycle_buf() 507 buf_state->page_info.page_offset, in gve_rx_append_frags() 555 buf_state->page_info.page_offset, in gve_rx_dqo() 592 buf_state->page_info.page_offset, buf_len, in gve_rx_dqo()
|
A D | gve_rx.c | 83 page_info->page_offset = 0; in gve_setup_rx_buffer() 159 rx->qpl_copy_pool[j].page_offset = 0; in gve_prefill_rx_pages() 351 u32 offset = page_info->page_offset + page_info->pad; in gve_rx_add_frags() 394 page_info->page_offset ^= PAGE_SIZE / 2; in gve_rx_flip_buff() 439 void *src = page_info->page_address + page_info->page_offset; in gve_rx_copy_to_pool() 472 alloc_page_info.page_offset = 0; in gve_rx_copy_to_pool() 489 dst = copy_page_info->page_address + copy_page_info->page_offset; in gve_rx_copy_to_pool() 499 copy_page_info->page_offset += rx->packet_buffer_size; in gve_rx_copy_to_pool() 500 copy_page_info->page_offset &= (PAGE_SIZE - 1); in gve_rx_copy_to_pool() 635 va = page_info->page_address + page_info->page_offset; in gve_rx() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx4/ |
A D | en_rx.c | 70 frag->page_offset = priv->rx_headroom; in mlx4_alloc_page() 89 frags->page_offset); in mlx4_en_alloc_frags() 148 frags->page_offset = XDP_PACKET_HEADROOM; in mlx4_en_prepare_rx_desc() 491 __skb_fill_page_desc(skb, nr, page, frags->page_offset, in mlx4_en_complete_rx_desc() 496 frags->page_offset ^= PAGE_SIZE / 2; in mlx4_en_complete_rx_desc() 507 frags->page_offset += sz_align; in mlx4_en_complete_rx_desc() 737 va = page_address(frags[0].page) + frags[0].page_offset; in mlx4_en_process_rx_cq() 766 dma = frags[0].dma + frags[0].page_offset; in mlx4_en_process_rx_cq() 805 dma = frags[0].dma + frags[0].page_offset; in mlx4_en_process_rx_cq() 811 frags[0].page_offset, length, true); in mlx4_en_process_rx_cq() [all …]
|
/linux-6.3-rc2/tools/testing/selftests/powerpc/primitives/ |
A D | load_unaligned_zeropad.c | 102 static int do_one_test(char *p, int page_offset) in do_one_test() argument 114 …printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, sho… in do_one_test()
|
/linux-6.3-rc2/drivers/gpu/drm/panfrost/ |
A D | panfrost_mmu.c | 446 pgoff_t page_offset; in panfrost_mmu_map_fault_addr() local 465 page_offset = addr >> PAGE_SHIFT; in panfrost_mmu_map_fault_addr() 466 page_offset -= bomapping->mmnode.start; in panfrost_mmu_map_fault_addr() 492 if (pages[page_offset]) { in panfrost_mmu_map_fault_addr() 502 for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { in panfrost_mmu_map_fault_addr() 513 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; in panfrost_mmu_map_fault_addr() 514 ret = sg_alloc_table_from_pages(sgt, pages + page_offset, in panfrost_mmu_map_fault_addr()
|
/linux-6.3-rc2/drivers/net/ethernet/pensando/ionic/ |
A D | ionic_txrx.c | 127 buf_info->page_offset = 0; in ionic_rx_page_alloc() 166 buf_info->page_offset += size; in ionic_rx_buf_recycle() 167 if (buf_info->page_offset >= IONIC_PAGE_SIZE) in ionic_rx_buf_recycle() 210 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); in ionic_rx_frags() 214 buf_info->dma_addr + buf_info->page_offset, in ionic_rx_frags() 218 buf_info->page, buf_info->page_offset, frag_len, in ionic_rx_frags() 264 dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, in ionic_rx_copybreak() 267 dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, in ionic_rx_copybreak() 454 desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); in ionic_rx_fill() 455 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset); in ionic_rx_fill() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/sfc/ |
A D | rx_common.h | 29 return page_address(buf->page) + buf->page_offset; in efx_rx_buf_va() 63 unsigned int page_offset,
|