/linux-6.3-rc2/fs/nfs/ |
A D | direct.c | 158 cinfo->dreq = dreq; in nfs_init_cinfo_from_dreq() 167 if (!dreq) in nfs_direct_req_alloc() 185 pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode); in nfs_direct_req_free() 244 dreq->iocb->ki_complete(dreq->iocb, res); in nfs_direct_complete() 255 struct nfs_direct_req *dreq = hdr->dreq; in nfs_direct_read_completion() local 433 dreq->bytes_left = dreq->max_count = count; in nfs_file_direct_read() 558 struct nfs_direct_req *dreq = data->dreq; in nfs_direct_commit_complete() local 600 struct nfs_direct_req *dreq = cinfo->dreq; in nfs_direct_resched_write() local 674 struct nfs_direct_req *dreq = hdr->dreq; in nfs_direct_write_completion() local 733 struct nfs_direct_req *dreq = hdr->dreq; in nfs_direct_write_reschedule_io() local [all …]
|
A D | cache_lib.c | 71 kfree(dreq); in nfs_cache_defer_req_put() 76 struct nfs_cache_defer_req *dreq; in nfs_dns_cache_revisit() local 80 complete(&dreq->completion); in nfs_dns_cache_revisit() 81 nfs_cache_defer_req_put(dreq); in nfs_dns_cache_revisit() 86 struct nfs_cache_defer_req *dreq; in nfs_dns_cache_defer() local 90 refcount_inc(&dreq->count); in nfs_dns_cache_defer() 92 return &dreq->deferred_req; in nfs_dns_cache_defer() 99 dreq = kzalloc(sizeof(*dreq), GFP_KERNEL); in nfs_cache_defer_req_alloc() 100 if (dreq) { in nfs_cache_defer_req_alloc() 102 refcount_set(&dreq->count, 1); in nfs_cache_defer_req_alloc() [all …]
|
A D | dns_resolve.c | 285 struct nfs_cache_defer_req *dreq) in do_cache_lookup() argument 291 ret = cache_check(cd, &(*item)->h, &dreq->req); in do_cache_lookup() 327 struct nfs_cache_defer_req *dreq; in do_cache_lookup_wait() local 330 dreq = nfs_cache_defer_req_alloc(); in do_cache_lookup_wait() 331 if (!dreq) in do_cache_lookup_wait() 333 ret = do_cache_lookup(cd, key, item, dreq); in do_cache_lookup_wait() 335 ret = nfs_cache_wait_for_upcall(dreq); in do_cache_lookup_wait() 339 nfs_cache_defer_req_put(dreq); in do_cache_lookup_wait()
|
A D | cache_lib.h | 24 extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq); 25 extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
|
A D | nfstrace.h | 1622 const struct nfs_direct_req *dreq 1625 TP_ARGS(dreq), 1639 const struct inode *inode = dreq->inode; 1646 __entry->offset = dreq->io_start; 1647 __entry->count = dreq->count; 1648 __entry->bytes_left = dreq->bytes_left; 1649 __entry->error = dreq->error; 1650 __entry->flags = dreq->flags; 1668 const struct nfs_direct_req *dreq \ 1670 TP_ARGS(dreq))
|
A D | internal.h | 542 struct nfs_direct_req *dreq); 641 struct nfs_direct_req *dreq); 642 extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq); 766 if (folio && !cinfo->dreq) { in nfs_folio_mark_unstable()
|
A D | write.c | 919 cinfo->dreq = NULL; in nfs_init_cinfo_from_inode() 925 struct nfs_direct_req *dreq) in nfs_init_cinfo() argument 927 if (dreq) in nfs_init_cinfo() 928 nfs_init_cinfo_from_dreq(cinfo, dreq); in nfs_init_cinfo() 1051 if ((ret == max) && !cinfo->dreq) in nfs_scan_commit_list() 1764 data->dreq = cinfo->dreq; in nfs_init_commit() 1897 nfs_init_cinfo(&cinfo, data->inode, data->dreq); in nfs_commit_release_pages()
|
A D | pagelist.c | 107 hdr->dreq = desc->pg_dreq; in nfs_pgheader_init() 1479 desc->pg_dreq = hdr->dreq; in nfs_pageio_resend()
|
/linux-6.3-rc2/net/dccp/ |
A D | minisocks.c | 117 newdp->dccps_iss = dreq->dreq_iss; in dccp_create_openreq_child() 118 newdp->dccps_gss = dreq->dreq_gss; in dccp_create_openreq_child() 120 newdp->dccps_isr = dreq->dreq_isr; in dccp_create_openreq_child() 121 newdp->dccps_gsr = dreq->dreq_gsr; in dccp_create_openreq_child() 155 spin_lock_bh(&dreq->dreq_lock); in dccp_check_req() 182 dreq->dreq_iss, dreq->dreq_gss)) { in dccp_check_req() 187 (unsigned long long) dreq->dreq_iss, in dccp_check_req() 192 if (dccp_parse_options(sk, dreq, skb)) in dccp_check_req() 209 spin_unlock_bh(&dreq->dreq_lock); in dccp_check_req() 262 spin_lock_init(&dreq->dreq_lock); in dccp_reqsk_init() [all …]
|
A D | options.c | 99 if (dreq != NULL && (opt >= DCCPO_MIN_RX_CCID_SPECIFIC || in dccp_parse_options() 125 rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, in dccp_parse_options() 144 if (dreq != NULL) { in dccp_parse_options() 145 dreq->dreq_timestamp_echo = ntohl(opt_val); in dccp_parse_options() 146 dreq->dreq_timestamp_time = dccp_timestamp(); in dccp_parse_options() 353 struct dccp_request_sock *dreq, in dccp_insert_option_timestamp_echo() argument 360 if (dreq != NULL) { in dccp_insert_option_timestamp_echo() 362 tstamp_echo = htonl(dreq->dreq_timestamp_echo); in dccp_insert_option_timestamp_echo() 363 dreq->dreq_timestamp_echo = 0; in dccp_insert_option_timestamp_echo() 596 if (dccp_feat_insert_opts(NULL, dreq, skb)) in dccp_insert_options_rsk() [all …]
|
A D | output.c | 403 struct dccp_request_sock *dreq; in dccp_make_response() local 422 dreq = dccp_rsk(req); in dccp_make_response() 424 dccp_inc_seqno(&dreq->dreq_gss); in dccp_make_response() 426 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss; in dccp_make_response() 429 if (dccp_feat_server_ccid_dependencies(dreq)) in dccp_make_response() 432 if (dccp_insert_options_rsk(dreq, skb)) in dccp_make_response() 444 dccp_hdr_set_seq(dh, dreq->dreq_gss); in dccp_make_response() 445 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr); in dccp_make_response() 446 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; in dccp_make_response()
|
A D | ipv4.c | 596 struct dccp_request_sock *dreq; in dccp_v4_conn_request() local 627 dreq = dccp_rsk(req); in dccp_v4_conn_request() 628 if (dccp_parse_options(sk, dreq, skb)) in dccp_v4_conn_request() 648 dreq->dreq_isr = dcb->dccpd_seq; in dccp_v4_conn_request() 649 dreq->dreq_gsr = dreq->dreq_isr; in dccp_v4_conn_request() 650 dreq->dreq_iss = dccp_v4_init_sequence(skb); in dccp_v4_conn_request() 651 dreq->dreq_gss = dreq->dreq_iss; in dccp_v4_conn_request() 652 dreq->dreq_service = service; in dccp_v4_conn_request()
|
A D | ipv6.c | 320 struct dccp_request_sock *dreq; in dccp_v6_conn_request() local 358 dreq = dccp_rsk(req); in dccp_v6_conn_request() 359 if (dccp_parse_options(sk, dreq, skb)) in dccp_v6_conn_request() 391 dreq->dreq_isr = dcb->dccpd_seq; in dccp_v6_conn_request() 392 dreq->dreq_gsr = dreq->dreq_isr; in dccp_v6_conn_request() 393 dreq->dreq_iss = dccp_v6_init_sequence(skb); in dccp_v6_conn_request() 394 dreq->dreq_gss = dreq->dreq_iss; in dccp_v6_conn_request() 395 dreq->dreq_service = service; in dccp_v6_conn_request()
|
A D | feat.c | 633 int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq, in dccp_feat_insert_opts() argument 636 struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg; in dccp_feat_insert_opts() 1004 int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq) in dccp_feat_server_ccid_dependencies() argument 1006 struct list_head *fn = &dreq->dreq_featneg; in dccp_feat_server_ccid_dependencies() 1404 int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, in dccp_feat_parse_options() argument 1408 struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg; in dccp_feat_parse_options()
|
A D | dccp.h | 456 int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
|
/linux-6.3-rc2/drivers/crypto/marvell/cesa/ |
A D | tdma.c | 37 void mv_cesa_dma_step(struct mv_cesa_req *dreq) in mv_cesa_dma_step() argument 39 struct mv_cesa_engine *engine = dreq->engine; in mv_cesa_dma_step() 51 writel_relaxed(dreq->chain.first->cur_dma, in mv_cesa_dma_step() 62 for (tdma = dreq->chain.first; tdma;) { in mv_cesa_dma_cleanup() 75 dreq->chain.first = NULL; in mv_cesa_dma_cleanup() 76 dreq->chain.last = NULL; in mv_cesa_dma_cleanup() 97 struct mv_cesa_req *dreq) in mv_cesa_tdma_chain() argument 100 engine->chain.first = dreq->chain.first; in mv_cesa_tdma_chain() 101 engine->chain.last = dreq->chain.last; in mv_cesa_tdma_chain() 106 last->next = dreq->chain.first; in mv_cesa_tdma_chain() [all …]
|
A D | cesa.h | 826 void mv_cesa_dma_step(struct mv_cesa_req *dreq); 828 static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq, in mv_cesa_dma_process() argument 840 void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, 842 void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq); 844 struct mv_cesa_req *dreq);
|
/linux-6.3-rc2/net/sunrpc/ |
A D | cache.c | 587 hlist_del_init(&dreq->hash); in __unhash_deferred_req() 589 list_del_init(&dreq->recent); in __unhash_deferred_req() 598 INIT_LIST_HEAD(&dreq->recent); in __hash_deferred_req() 607 dreq->item = item; in setup_deferral() 642 setup_deferral(dreq, item, 0); in cache_wait_req() 717 dreq = req->defer(req); in cache_defer_req() 718 if (dreq == NULL) in cache_defer_req() 742 if (dreq->item == item) { in cache_revisit_request() 752 dreq->revisit(dreq, 0); in cache_revisit_request() 766 if (dreq->owner == owner) { in cache_clean_deferred() [all …]
|
/linux-6.3-rc2/drivers/s390/block/ |
A D | dasd_diag.c | 167 struct dasd_diag_req *dreq; in dasd_start_diag() local 178 dreq = cqr->data; in dasd_start_diag() 183 private->iob.block_count = dreq->block_count; in dasd_start_diag() 185 private->iob.bio_list = dreq->bio; in dasd_start_diag() 512 struct dasd_diag_req *dreq; in dasd_diag_build_cp() local 545 cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count), in dasd_diag_build_cp() 550 dreq = (struct dasd_diag_req *) cqr->data; in dasd_diag_build_cp() 551 dreq->block_count = count; in dasd_diag_build_cp() 552 dbio = dreq->bio; in dasd_diag_build_cp()
|
/linux-6.3-rc2/drivers/dma/ |
A D | bcm2835-dma.c | 72 unsigned int dreq; member 659 if (c->dreq != 0) in bcm2835_dma_prep_slave_sg() 660 info |= BCM2835_DMA_PER_MAP(c->dreq); in bcm2835_dma_prep_slave_sg() 733 if (c->dreq != 0) in bcm2835_dma_prep_dma_cyclic() 734 info |= BCM2835_DMA_PER_MAP(c->dreq); in bcm2835_dma_prep_dma_cyclic() 873 to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; in bcm2835_dma_xlate()
|
/linux-6.3-rc2/fs/nilfs2/ |
A D | btree.c | 1735 union nilfs_bmap_ptr_req *dreq, in nilfs_btree_prepare_convert_and_insert() argument 1757 ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); in nilfs_btree_prepare_convert_and_insert() 1764 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert() 1784 nilfs_bmap_abort_alloc_ptr(btree, dreq, dat); in nilfs_btree_prepare_convert_and_insert() 1795 union nilfs_bmap_ptr_req *dreq, in nilfs_btree_commit_convert_and_insert() argument 1815 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); in nilfs_btree_commit_convert_and_insert() 1837 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); in nilfs_btree_commit_convert_and_insert() 1851 nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr); in nilfs_btree_commit_convert_and_insert() 1868 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; in nilfs_btree_convert_and_insert() local 1873 di = &dreq; in nilfs_btree_convert_and_insert() [all …]
|
/linux-6.3-rc2/Documentation/devicetree/bindings/dma/ |
A D | st_fdma.txt | 51 -bit 2-0: Holdoff value, dreq will be masked for
|
/linux-6.3-rc2/include/linux/ |
A D | dccp.h | 180 extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
|
A D | nfs_xdr.h | 1621 struct nfs_direct_req *dreq; member 1662 struct nfs_direct_req *dreq; /* O_DIRECT request */ member 1674 struct nfs_direct_req *dreq; /* O_DIRECT request */ member
|
/linux-6.3-rc2/drivers/infiniband/core/ |
A D | cm_trace.h | 183 DEFINE_CM_SEND_EVENT(dreq);
|