| /linux/drivers/gpu/drm/radeon/ |
| A D | radeon_cs.c | 304 if (p->chunks == NULL) { in radeon_cs_parser_init() 319 p->chunk_relocs = &p->chunks[i]; in radeon_cs_parser_init() 322 p->chunk_ib = &p->chunks[i]; in radeon_cs_parser_init() 334 p->chunk_flags = &p->chunks[i]; in radeon_cs_parser_init() 340 size = p->chunks[i].length_dw; in radeon_cs_parser_init() 342 p->chunks[i].user_ptr = cdata; in radeon_cs_parser_init() 361 if (p->chunks[i].length_dw > 1) in radeon_cs_parser_init() 362 ring = p->chunks[i].kdata[1]; in radeon_cs_parser_init() 363 if (p->chunks[i].length_dw > 2) in radeon_cs_parser_init() 458 kvfree(parser->chunks[i].kdata); in radeon_cs_parser_fini() [all …]
|
| /linux/drivers/comedi/drivers/ni_routing/tools/ |
| A D | convert_csv_to_c.py | 228 chunks = [ self.output_file_top, 244 chunks.append('\t&{},'.format(dev_table_name)) 273 chunks.append('\tNULL,') # terminate list 274 chunks.append('};') 275 return '\n'.join(chunks) 416 chunks = [ self.output_file_top, 432 chunks.append('\t&{},'.format(fam_table_name)) 462 chunks.append('\tNULL,') # terminate list 463 chunks.append('};') 464 return '\n'.join(chunks)
|
| /linux/drivers/net/ethernet/netronome/nfp/nfpcore/ |
| A D | nfp_nsp.c | 505 } *chunks; in nfp_nsp_command_buf_dma_sg() local 517 chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL); in nfp_nsp_command_buf_dma_sg() 518 if (!chunks) in nfp_nsp_command_buf_dma_sg() 526 chunks[i].chunk = kmalloc(chunk_size, in nfp_nsp_command_buf_dma_sg() 528 if (!chunks[i].chunk) in nfp_nsp_command_buf_dma_sg() 540 off += chunks[i].len; in nfp_nsp_command_buf_dma_sg() 548 addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, in nfp_nsp_command_buf_dma_sg() 550 chunks[i].dma_addr = addr; in nfp_nsp_command_buf_dma_sg() 609 dma_unmap_single(dev, chunks[i].dma_addr, chunks[i].len, in nfp_nsp_command_buf_dma_sg() 614 kfree(chunks[i].chunk); in nfp_nsp_command_buf_dma_sg() [all …]
|
| /linux/drivers/net/mctp/ |
| A D | mctp-serial.c | 533 u8 chunks[MAX_CHUNKS]; member 566 .chunks = { 3, 1, 1, 0}, 571 .chunks = { 3, 1, 1, 0}, 576 .chunks = { 1, 2, 0}, 581 .chunks = { 1, 1, 1, 0}, 586 .chunks = { 1, 1, 1, 1, 0}, 596 .chunks = { 1, 0 }, 601 .chunks = { 1, 0 }, 606 .chunks = { 3, 0 }, 611 .chunks = { 7, 0 }, [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| A D | panthor_heap.c | 54 struct list_head chunks; member 163 if (initial_chunk && !list_empty(&heap->chunks)) { in panthor_alloc_heap_chunk() 167 prev_chunk = list_first_entry(&heap->chunks, in panthor_alloc_heap_chunk() 179 list_add(&chunk->node, &heap->chunks); in panthor_alloc_heap_chunk() 199 list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) in panthor_free_heap_chunks() 306 INIT_LIST_HEAD(&heap->chunks); in panthor_heap_create() 316 first_chunk = list_first_entry(&heap->chunks, in panthor_heap_create() 387 list_for_each_entry_safe(chunk, tmp, &heap->chunks, node) { in panthor_heap_return_chunk() 473 chunk = list_first_entry(&heap->chunks, in panthor_heap_grow()
|
| /linux/scripts/gdb/linux/ |
| A D | timerlist.py | 164 chunks = [] 170 chunks.append(buf[start:end]) 172 chunks.append(',') 176 chunks[0] = chunks[0][0] # Cut off the first 0 178 return "".join(str(chunks))
|
| /linux/net/xdp/ |
| A D | xdp_umem.c | 163 u64 chunks, npgs; in xdp_umem_reg() local 198 chunks = div_u64_rem(size, chunk_size, &chunks_rem); in xdp_umem_reg() 199 if (!chunks || chunks > U32_MAX) in xdp_umem_reg() 217 umem->chunks = chunks; in xdp_umem_reg()
|
| /linux/drivers/infiniband/hw/usnic/ |
| A D | usnic_vnic.c | 44 struct usnic_vnic_res_chunk chunks[USNIC_VNIC_RES_TYPE_MAX]; member 117 for (i = 0; i < ARRAY_SIZE(vnic->chunks); i++) { in usnic_vnic_dump() 118 chunk = &vnic->chunks[i]; in usnic_vnic_dump() 222 return vnic->chunks[type].cnt; in usnic_vnic_res_cnt() 228 return vnic->chunks[type].free_cnt; in usnic_vnic_res_free_cnt() 254 src = &vnic->chunks[type]; in usnic_vnic_get_resources() 286 vnic->chunks[res->type].free_cnt++; in usnic_vnic_put_resources() 382 &vnic->chunks[res_type]); in usnic_vnic_discover_resources() 391 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_discover_resources() 427 usnic_vnic_free_res_chunk(&vnic->chunks[res_type]); in usnic_vnic_release_resources()
|
| /linux/drivers/net/ethernet/intel/idpf/ |
| A D | idpf_virtchnl.c | 1069 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_q_reg() 1168 chunks = &vc_aq->chunks; in idpf_queue_reg_init() 1171 chunks = &vport_params->chunks; in idpf_queue_reg_init() 1808 buf_sz = struct_size(eq, chunks.chunks, num_chunks); in idpf_send_ena_dis_queues_msg() 2054 chunks = &vport_config->req_qs_chunks->chunks; in idpf_send_delete_queues_msg() 2057 chunks = &vport_params->chunks; in idpf_send_delete_queues_msg() 2070 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, in idpf_send_delete_queues_msg() 2153 size = struct_size(vc_msg, chunks.chunks, in idpf_send_add_queues_msg() 3249 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_queue_ids() 3365 chunks = &vc_aq->chunks; in idpf_vport_queue_ids_init() [all …]
|
| /linux/mm/ |
| A D | zbud.c | 249 int chunks, i, freechunks; in zbud_alloc() local 258 chunks = size_to_chunks(size); in zbud_alloc() 262 for_each_unbuddied_list(i, chunks) { in zbud_alloc() 287 zhdr->first_chunks = chunks; in zbud_alloc() 289 zhdr->last_chunks = chunks; in zbud_alloc()
|
| A D | z3fold.c | 630 short chunks = size_to_chunks(sz); in compact_single_buddy() local 644 new_zhdr->first_chunks = chunks; in compact_single_buddy() 648 new_zhdr->middle_chunks = chunks; in compact_single_buddy() 654 new_zhdr->last_chunks = chunks; in compact_single_buddy() 792 int chunks = size_to_chunks(size), i; in __z3fold_alloc() local 798 for_each_unbuddied_list(i, chunks) { in __z3fold_alloc() 854 l = &unbuddied[chunks]; in __z3fold_alloc() 1004 int chunks = size_to_chunks(size); in z3fold_alloc() local 1064 zhdr->first_chunks = chunks; in z3fold_alloc() 1066 zhdr->last_chunks = chunks; in z3fold_alloc() [all …]
|
| /linux/lib/ |
| A D | genalloc.c | 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 203 list_add_rcu(&chunk->next_chunk, &pool->chunks); in gen_pool_add_owner() 223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_virt_to_phys() 249 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { in gen_pool_destroy() 297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_alloc_algo_owner() 503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { in gen_pool_free_owner() 538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) in gen_pool_for_each_chunk() 561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { in gen_pool_has_addr() 586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_avail() 605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) in gen_pool_size()
|
| /linux/tools/testing/selftests/drivers/net/mlxsw/spectrum/ |
| A D | devlink_lib_spectrum.sh | 90 devlink_resource_size_set 32000 kvd linear chunks 99 devlink_resource_size_set 32000 kvd linear chunks 108 devlink_resource_size_set 49152 kvd linear chunks
|
| /linux/Documentation/networking/ |
| A D | oa-tc6-framework.rst | 49 each chunk. Ethernet frames are transferred over one or more data chunks. 59 receive (RX) chunks. Chunks in both transmit and receive directions may 69 In parallel, receive data chunks are received on MISO. Each receive data 160 the MAC-PHY will be converted into multiple transmit data chunks. Each 177 transaction. For TX data chunks, this bit shall be ’1’. 254 host will be sent as multiple receive data chunks. Each receive data 286 data chunks of frame data that are available for 350 transmit data chunks (empty chunks) or data valid transmit data chunks in 356 once the receive data chunks become available again for reading, the 359 first data chunk will have the receive chunks available information. [all …]
|
| /linux/net/sctp/ |
| A D | chunk.c | 43 INIT_LIST_HEAD(&msg->chunks); in sctp_datamsg_init() 65 list_for_each_entry(chunk, &msg->chunks, frag_list) in sctp_datamsg_free() 81 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_destroy() 280 list_add_tail(&chunk->frag_list, &msg->chunks); in sctp_datamsg_from_user() 289 list_for_each_safe(pos, temp, &msg->chunks) { in sctp_datamsg_from_user()
|
| A D | auth.c | 186 struct sctp_chunks_param *chunks, in sctp_auth_make_key_vector() argument 197 if (chunks) in sctp_auth_make_key_vector() 198 chunks_len = ntohs(chunks->param_hdr.length); in sctp_auth_make_key_vector() 209 if (chunks) { in sctp_auth_make_key_vector() 210 memcpy(new->data + offset, chunks, chunks_len); in sctp_auth_make_key_vector() 656 switch (param->chunks[i]) { in __sctp_auth_cid() 664 if (param->chunks[i] == chunk) in __sctp_auth_cid() 772 p->chunks[nchunks] = chunk_id; in sctp_auth_ep_add_chunkid()
|
| /linux/kernel/ |
| A D | audit_tree.c | 17 struct list_head chunks; member 101 INIT_LIST_HEAD(&tree->chunks); in alloc_tree() 435 list_add(&chunk->owners[0].list, &tree->chunks); in create_chunk() 507 list_add(&p->list, &tree->chunks); in tag_chunk() 572 while (!list_empty(&victim->chunks)) { in prune_tree_chunks() 577 p = list_first_entry(&victim->chunks, struct audit_node, list); in prune_tree_chunks() 618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 623 list_add(p, &tree->chunks); in trim_marked() 705 list_for_each_entry(node, &tree->chunks, list) { in audit_trim_trees() 845 list_for_each_entry(node, &tree->chunks, list) in audit_add_tree_rule() [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| A D | amdgpu_cs.c | 204 if (!p->chunks) { in amdgpu_cs_pass1() 221 p->chunks[i].chunk_id = user_chunk.chunk_id; in amdgpu_cs_pass1() 224 size = p->chunks[i].length_dw; in amdgpu_cs_pass1() 229 if (p->chunks[i].kdata == NULL) { in amdgpu_cs_pass1() 242 switch (p->chunks[i].chunk_id) { in amdgpu_cs_pass1() 321 kvfree(p->chunks[i].kdata); in amdgpu_cs_pass1() 322 kvfree(p->chunks); in amdgpu_cs_pass1() 323 p->chunks = NULL; in amdgpu_cs_pass1() 601 chunk = &p->chunks[i]; in amdgpu_cs_pass2() 1401 kvfree(parser->chunks[i].kdata); in amdgpu_cs_parser_fini() [all …]
|
| /linux/drivers/infiniband/ulp/rtrs/ |
| A D | README | 28 session. A session is associated with a set of memory chunks reserved on the 36 chunks reserved for him on the server side. Their number, size and addresses 45 which of the memory chunks has been accessed and at which offset the message 80 the server (number of memory chunks which are going to be allocated for that 122 1. When processing a write request client selects one of the memory chunks 139 1. When processing a write request client selects one of the memory chunks 144 using the IMM field, Server invalidate rkey associated to the memory chunks 162 1. When processing a read request client selects one of the memory chunks 181 1. When processing a read request client selects one of the memory chunks 186 Server invalidate rkey associated to the memory chunks first, when it finishes,
|
| /linux/drivers/infiniband/hw/efa/ |
| A D | efa_verbs.c | 109 struct pbl_chunk *chunks; member 1303 sizeof(*chunk_list->chunks), in pbl_chunk_list_create() 1305 if (!chunk_list->chunks) in pbl_chunk_list_create() 1315 if (!chunk_list->chunks[i].buf) in pbl_chunk_list_create() 1327 cur_chunk_buf = chunk_list->chunks[0].buf; in pbl_chunk_list_create() 1343 chunk_list->chunks[i].buf, in pbl_chunk_list_create() 1344 chunk_list->chunks[i].length, in pbl_chunk_list_create() 1379 kfree(chunk_list->chunks[i].buf); in pbl_chunk_list_create() 1381 kfree(chunk_list->chunks); in pbl_chunk_list_create() 1393 kfree(chunk_list->chunks[i].buf); in pbl_chunk_list_destroy() [all …]
|
| /linux/arch/x86/kernel/cpu/resctrl/ |
| A D | monitor.c | 309 u64 shift = 64 - width, chunks; in mbm_overflow_count() local 311 chunks = (cur_msr << shift) - (prev_msr << shift); in mbm_overflow_count() 312 return chunks >> shift; in mbm_overflow_count() 323 u64 msr_val, chunks; in resctrl_arch_rmid_read() local 336 am->chunks += mbm_overflow_count(am->prev_msr, msr_val, in resctrl_arch_rmid_read() 338 chunks = get_corrected_mbm_count(rmid, am->chunks); in resctrl_arch_rmid_read() 341 chunks = msr_val; in resctrl_arch_rmid_read() 344 *val = chunks * hw_res->mon_scale; in resctrl_arch_rmid_read()
|
| /linux/tools/testing/selftests/bpf/ |
| A D | generate_udp_fragments.py | 46 chunks = [frag[i : i + 10] for i in range(0, len(frag), 10)] 47 chunks_fmted = [", ".join([str(hex(b)) for b in chunk]) for chunk in chunks]
|
| /linux/drivers/net/wireless/ti/wlcore/ |
| A D | boot.c | 237 u32 chunks, addr, len; in wlcore_boot_upload_firmware() local 242 chunks = be32_to_cpup((__be32 *) fw); in wlcore_boot_upload_firmware() 245 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); in wlcore_boot_upload_firmware() 247 while (chunks--) { in wlcore_boot_upload_firmware() 258 chunks, addr, len); in wlcore_boot_upload_firmware()
|
| /linux/drivers/net/wireless/intel/iwlwifi/pcie/ |
| A D | ctxt-info-gen3.c | 326 len0 = pnvm_data->chunks[0].len; in iwl_pcie_load_payloads_continuously() 327 len1 = pnvm_data->chunks[1].len; in iwl_pcie_load_payloads_continuously() 342 memcpy(dram->block, pnvm_data->chunks[0].data, len0); in iwl_pcie_load_payloads_continuously() 343 memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1); in iwl_pcie_load_payloads_continuously() 376 len = pnvm_data->chunks[i].len; in iwl_pcie_load_payloads_segments() 377 data = pnvm_data->chunks[i].data; in iwl_pcie_load_payloads_segments()
|
| /linux/drivers/md/ |
| A D | md-bitmap.c | 161 unsigned long chunks; member 1016 bytes = DIV_ROUND_UP(chunks, 8); in md_bitmap_storage_alloc() 1320 unsigned long chunks = bitmap->counts.chunks; in md_bitmap_init_from_disk() local 1332 for (i = 0; i < chunks ; i++) { in md_bitmap_init_from_disk() 1402 for (i = 0; i < chunks; i++) { in md_bitmap_init_from_disk() 1428 bit_cnt, chunks); in md_bitmap_init_from_disk() 2378 unsigned long chunks; in __bitmap_resize() local 2414 bytes = DIV_ROUND_UP(chunks, 8); in __bitmap_resize() 2462 bitmap->counts.chunks = chunks; in __bitmap_resize() 2467 chunks << chunkshift); in __bitmap_resize() [all …]
|