/linux-6.3-rc2/arch/mips/ar7/ |
A D | prom.c | 149 struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data; in parse_psp_env() local 151 memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE); in parse_psp_env() 156 if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n)) in parse_psp_env() 158 value = chunks[i].data; in parse_psp_env() 159 if (chunks[i].num) { in parse_psp_env() 160 name = lookup_psp_var_map(chunks[i].num); in parse_psp_env() 167 i += chunks[i].len; in parse_psp_env()
|
/linux-6.3-rc2/drivers/gpu/drm/radeon/ |
A D | radeon_cs.c | 303 if (p->chunks == NULL) { in radeon_cs_parser_init() 318 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 321 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 333 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init() 339 size = p->chunks[i].length_dw; in radeon_cs_parser_init() 341 p->chunks[i].user_ptr = cdata; in radeon_cs_parser_init() 360 if (p->chunks[i].length_dw > 1) in radeon_cs_parser_init() 361 ring = p->chunks[i].kdata[1]; in radeon_cs_parser_init() 362 if (p->chunks[i].length_dw > 2) in radeon_cs_parser_init() 457 kvfree(parser->chunks[i].kdata); in radeon_cs_parser_fini() [all …]
|
/linux-6.3-rc2/drivers/comedi/drivers/ni_routing/tools/ |
A D | convert_csv_to_c.py | 228 chunks = [ self.output_file_top, 244 chunks.append('\t&{},'.format(dev_table_name)) 273 chunks.append('\tNULL,') # terminate list 274 chunks.append('};') 275 return '\n'.join(chunks) 416 chunks = [ self.output_file_top, 432 chunks.append('\t&{},'.format(fam_table_name)) 462 chunks.append('\tNULL,') # terminate list 463 chunks.append('};') 464 return '\n'.join(chunks)
|
/linux-6.3-rc2/drivers/net/ethernet/netronome/nfp/nfpcore/ |
A D | nfp_nsp.c | 505 } *chunks; in nfp_nsp_command_buf_dma_sg() local 517 chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 518 if (!chunks) in nfp_nsp_command_buf_dma_sg() 526 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 528 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 540 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 548 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() 550 chunks[i].dma_addr = addr; in nfp_nsp_command_buf_dma_sg() 609 dma_unmap_single(dev, chunks[i].dma_addr, chunks[i].len, in nfp_nsp_command_buf_dma_sg() 614 kfree(chunks[i].chunk); in nfp_nsp_command_buf_dma_sg() [all …]
|
/linux-6.3-rc2/scripts/gdb/linux/ |
A D | timerlist.py | 162 chunks = [] 168 chunks.append(buf[start:end]) 170 chunks.append(',') 174 chunks[0] = chunks[0][0] # Cut off the first 0 176 return "".join(chunks)
|
/linux-6.3-rc2/drivers/infiniband/hw/usnic/ |
A D | usnic_vnic.c | 44 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 117 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 118 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 222 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 228 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 254 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 286 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 382 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 391 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 427 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
/linux-6.3-rc2/net/xdp/ |
A D | xdp_umem.c | 156 unsigned int chunks, chunks_rem; in xdp_umem_reg() local 191 chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg() 192 if (chunks == 0) in xdp_umem_reg() 204 umem->chunks = chunks; in xdp_umem_reg()
|
/linux-6.3-rc2/lib/ |
A D | genalloc.c | 158 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 201 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner() 221 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys() 247 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy() 295 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner() 501 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner() 536 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk() 559 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in gen_pool_has_addr() 584 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail() 603 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
|
/linux-6.3-rc2/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
A D | devlink_lib_spectrum.sh | 90 devlink_resource_size_set 32000 kvd linear chunks 99 devlink_resource_size_set 32000 kvd linear chunks 108 devlink_resource_size_set 49152 kvd linear chunks
|
/linux-6.3-rc2/arch/x86/kernel/cpu/resctrl/ |
A D | monitor.c | 226 u64 shift = 64 - width, chunks; in mbm_overflow_count() local 228 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count() 229 return chunks >> shift; in mbm_overflow_count() 238 u64 msr_val, chunks; in resctrl_arch_rmid_read() local 250 am->chunks += mbm_overflow_count(am->prev_msr, msr_val, in resctrl_arch_rmid_read() 252 chunks = get_corrected_mbm_count(rmid, am->chunks); in resctrl_arch_rmid_read() 255 chunks = msr_val; in resctrl_arch_rmid_read() 258 *val = chunks * hw_res->mon_scale; in resctrl_arch_rmid_read()
|
/linux-6.3-rc2/net/sctp/ |
A D | chunk.c | 43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
A D | auth.c | 186 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument 197 if (chunks) in sctp_auth_make_key_vector() 198 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector() 209 if (chunks) { in sctp_auth_make_key_vector() 210 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector() 656 switch (param->chunks[i]) { in __sctp_auth_cid() 664 if (param->chunks[i] == chunk) in __sctp_auth_cid() 772 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
|
/linux-6.3-rc2/kernel/ |
A D | audit_tree.c | 17 struct list_head chunks; member 101 INIT_LIST_HEAD(&tree->chunks); in alloc_tree() 435 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk() 507 list_add(&p->list, &tree->chunks); in tag_chunk() 572 while (!list_empty(&victim->chunks)) { in prune_tree_chunks() 577 p = list_first_entry(&victim->chunks, struct audit_node, list); in prune_tree_chunks() 618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 623 list_add(p, &tree->chunks); in trim_marked() 705 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees() 845 list_for_each_entry(node, &tree->chunks, list) in audit_add_tree_rule() [all …]
|
/linux-6.3-rc2/mm/ |
A D | zbud.c | 262 int chunks, i, freechunks; in zbud_alloc() local 271 chunks = size_to_chunks(size); in zbud_alloc() 275 for_each_unbuddied_list(i, chunks) { in zbud_alloc() 300 zhdr->first_chunks = chunks; in zbud_alloc() 302 zhdr->last_chunks = chunks; in zbud_alloc()
|
A D | z3fold.c | 629 short chunks = size_to_chunks(sz); in compact_single_buddy() local 643 new_zhdr->first_chunks = chunks; in compact_single_buddy() 647 new_zhdr->middle_chunks = chunks; in compact_single_buddy() 653 new_zhdr->last_chunks = chunks; in compact_single_buddy() 791 int chunks = size_to_chunks(size), i; in __z3fold_alloc() local 797 for_each_unbuddied_list(i, chunks) { in __z3fold_alloc() 853 l = &unbuddied[chunks]; in __z3fold_alloc() 1004 int chunks = size_to_chunks(size); in z3fold_alloc() local 1065 zhdr->first_chunks = chunks; in z3fold_alloc() 1067 zhdr->last_chunks = chunks; in z3fold_alloc() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/amd/amdgpu/ |
A D | amdgpu_cs.c | 215 if (!p->chunks) { in amdgpu_cs_pass1() 232 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_pass1() 235 size = p->chunks[i].length_dw; in amdgpu_cs_pass1() 240 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_pass1() 253 switch (p->chunks[i].chunk_id) { in amdgpu_cs_pass1() 326 kvfree(p->chunks[i].kdata); in amdgpu_cs_pass1() 327 kvfree(p->chunks); in amdgpu_cs_pass1() 328 p->chunks = NULL; in amdgpu_cs_pass1() 586 chunk = &p->chunks[i]; in amdgpu_cs_pass2() 1383 kvfree(parser->chunks[i].kdata); in amdgpu_cs_parser_fini() [all …]
|
/linux-6.3-rc2/drivers/infiniband/hw/efa/ |
A D | efa_verbs.c | 106 struct pbl_chunk *chunks; member 1276 sizeof(*chunk_list->chunks), in pbl_chunk_list_create() 1278 if (!chunk_list->chunks) in pbl_chunk_list_create() 1288 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create() 1300 cur_chunk_buf = chunk_list->chunks[0].buf; in pbl_chunk_list_create() 1316 chunk_list->chunks[i].buf, in pbl_chunk_list_create() 1317 chunk_list->chunks[i].length, in pbl_chunk_list_create() 1352 kfree(chunk_list->chunks[i].buf); in pbl_chunk_list_create() 1354 kfree(chunk_list->chunks); in pbl_chunk_list_create() 1366 kfree(chunk_list->chunks[i].buf); in pbl_chunk_list_destroy() [all …]
|
/linux-6.3-rc2/drivers/infiniband/ulp/rtrs/ |
A D | README | 28 session. A session is associated with a set of memory chunks reserved on the 36 chunks reserved for him on the server side. Their number, size and addresses 45 which of the memory chunks has been accessed and at which offset the message 80 the server (number of memory chunks which are going to be allocated for that 122 1. When processing a write request client selects one of the memory chunks 139 1. When processing a write request client selects one of the memory chunks 144 using the IMM field, Server invalidate rkey associated to the memory chunks 162 1. When processing a read request client selects one of the memory chunks 181 1. When processing a read request client selects one of the memory chunks 186 Server invalidate rkey associated to the memory chunks first, when it finishes,
|
A D | rtrs-srv.c | 595 int nr, nr_sgt, chunks; in map_cont_bufs() local 601 srv->queue_depth - chunks); in map_cont_bufs() 608 sg_set_page(s, srv->chunks[chunks + i], in map_cont_bufs() 1017 data = page_address(srv->chunks[buf_id]); in process_read() 1070 data = page_address(srv->chunks[buf_id]); in process_write() 1348 kfree(srv->chunks); in free_srv() 1394 srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks), in get_or_create_srv() 1396 if (!srv->chunks) in get_or_create_srv() 1400 srv->chunks[i] = alloc_pages(GFP_KERNEL, in get_or_create_srv() 1402 if (!srv->chunks[i]) in get_or_create_srv() [all …]
|
/linux-6.3-rc2/drivers/md/ |
A D | md-bitmap.c | 784 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_storage_alloc() 1067 chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk() 1074 for (i = 0; i < chunks ; i++) { in md_bitmap_init_from_disk() 1105 for (i = 0; i < chunks; i++) { in md_bitmap_init_from_disk() 1169 bit_cnt, chunks); in md_bitmap_init_from_disk() 1289 for (j = 0; j < counts->chunks; j++) { in md_bitmap_daemon_work() 2069 unsigned long chunks; in md_bitmap_resize() local 2105 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_resize() 2153 bitmap->counts.chunks = chunks; in md_bitmap_resize() 2158 chunks << chunkshift); in md_bitmap_resize() [all …]
|
/linux-6.3-rc2/drivers/net/wireless/ti/wlcore/ |
A D | boot.c | 240 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local 245 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware() 248 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware() 250 while (chunks--) { in wlcore_boot_upload_firmware() 261 chunks, addr, len); in wlcore_boot_upload_firmware()
|
/linux-6.3-rc2/drivers/gpu/drm/sprd/ |
A D | sprd_dsi.c | 462 u32 chunks = 0; in sprd_dsi_dpi_video() local 541 chunks = vm->hactive / video_size; in sprd_dsi_dpi_video() 544 if (total_bytes >= (bytes_per_chunk * chunks)) { in sprd_dsi_dpi_video() 546 bytes_per_chunk * chunks; in sprd_dsi_dpi_video() 552 if (bytes_left > (pkt_header * chunks)) { in sprd_dsi_dpi_video() 554 pkt_header * chunks) / chunks; in sprd_dsi_dpi_video() 562 chunks = 1; in sprd_dsi_dpi_video() 573 dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks); in sprd_dsi_dpi_video()
|
/linux-6.3-rc2/drivers/virt/vboxguest/ |
A D | vboxguest_core.c | 356 u32 i, chunks; in vbg_balloon_work() local 384 chunks = req->balloon_chunks; in vbg_balloon_work() 385 if (chunks > gdev->mem_balloon.max_chunks) { in vbg_balloon_work() 387 __func__, chunks, gdev->mem_balloon.max_chunks); in vbg_balloon_work() 391 if (chunks > gdev->mem_balloon.chunks) { in vbg_balloon_work() 393 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { in vbg_balloon_work() 398 gdev->mem_balloon.chunks++; in vbg_balloon_work() 402 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { in vbg_balloon_work() 407 gdev->mem_balloon.chunks--; in vbg_balloon_work() 1641 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; in vbg_ioctl_check_balloon()
|
/linux-6.3-rc2/Documentation/admin-guide/device-mapper/ |
A D | striped.rst | 6 device across one or more underlying devices. Data is written in "chunks", 7 with consecutive chunks rotating among the underlying devices. This can
|
/linux-6.3-rc2/drivers/dma/sh/ |
A D | shdma-base.c | 97 if (chunk->chunks == 1) { in shdma_tx_submit() 356 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { in __ld_cleanup() 372 BUG_ON(desc->chunks != 1); in __ld_cleanup() 567 int chunks = 0; in shdma_prep_sg() local 572 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); in shdma_prep_sg() 612 new->chunks = 1; in shdma_prep_sg() 614 new->chunks = chunks--; in shdma_prep_sg()
|