| /net/sunrpc/ |
| A D | sched.c | 782 trace_rpc_task_timeout(task, task->tk_action); in __rpc_queue_timer_fn() 815 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); in rpc_prepare_task() 842 trace_rpc_task_end(task, task->tk_action); in rpc_exit_task() 845 task->tk_ops->rpc_count_stats(task, task->tk_calldata); in rpc_exit_task() 850 task->tk_ops->rpc_call_done(task, task->tk_calldata); in rpc_exit_task() 868 trace_rpc_task_signalled(task, task->tk_action); in rpc_signal_task() 945 xprt_needs_memalloc(task->tk_xprt, task)) in __rpc_execute() 1107 memset(task, 0, sizeof(*task)); in rpc_init_task() 1146 struct rpc_task *task = setup_data->task; in rpc_new_task() local 1239 rpc_do_put_task(task, task->tk_workqueue); in rpc_put_task_async() [all …]
|
| A D | clnt.c | 1956 task->tk_status = rpcauth_wrap_req(task, &xdr); in rpc_xdr_encode() 1973 if (task->tk_status == 0 && rpc_reply_expected(task)) in call_encode() 1974 task->tk_status = xprt_request_enqueue_receive(task); in call_encode() 1985 rpc_call_rpcerror(task, task->tk_status); in call_encode() 1993 rpc_call_rpcerror(task, task->tk_status); in call_encode() 2337 rpc_call_rpcerror(task, task->tk_status); in call_transmit_status() 2447 trace_xprt_ping(task->tk_xprt, task->tk_status); in call_status() 2629 task->tk_status = rpcauth_unwrap_resp(task, &xdr); in call_decode() 2770 task->tk_status = xprt_request_enqueue_receive(task); in rpc_decode_header() 3356 task->tk_pid, task->tk_flags, task->tk_status, in rpc_show_task() [all …]
|
| A D | xprt.c | 276 xprt->snd_task = task; in xprt_reserve_xprt() 286 if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task)) in xprt_reserve_xprt() 352 if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task)) in xprt_reserve_xprt_cong() 380 xprt->snd_task = task; in __xprt_lock_write_func() 1513 task, 0); in xprt_prepare_transmit() 1655 if (task->tk_rqstp) in xprt_complete_request_init() 1765 task->tk_status = 0; in xprt_alloc_slot() 1889 req->rq_task = task; in xprt_request_init() 1902 xprt_init_majortimeo(task, req, task->tk_client->cl_timeout); in xprt_request_init() 1927 task->tk_status = 0; in xprt_reserve() [all …]
|
| A D | auth.c | 615 if (RPC_IS_ASYNC(task)) in rpcauth_bind_root_cred() 633 if (RPC_IS_ASYNC(task)) in rpcauth_bind_machine_cred() 659 if (task->tk_op_cred) in rpcauth_bindcred() 728 return ops->crmarshal(task, xdr); in rpcauth_marshcred() 743 encode(task->tk_rqstp, xdr, task->tk_msg.rpc_argp); in rpcauth_wrap_req_encode() 761 return ops->crwrap_req(task, xdr); in rpcauth_wrap_req() 798 return decode(task->tk_rqstp, xdr, task->tk_msg.rpc_resp); in rpcauth_unwrap_resp_decode() 833 cred = task->tk_rqstp->rq_cred; in rpcauth_refreshcred() 835 err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags); in rpcauth_refreshcred() 838 cred = task->tk_rqstp->rq_cred; in rpcauth_refreshcred() [all …]
|
| A D | auth_tls.c | 37 task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; in rpc_tls_probe_call_prepare() 38 rpc_call_start(task); in rpc_tls_probe_call_prepare() 41 static void rpc_tls_probe_call_done(struct rpc_task *task, void *data) in rpc_tls_probe_call_done() argument 62 struct rpc_task *task; in tls_probe() local 65 task = rpc_run_task(&task_setup_data); in tls_probe() 66 if (IS_ERR(task)) in tls_probe() 67 return PTR_ERR(task); in tls_probe() 68 status = task->tk_status; in tls_probe() 69 rpc_put_task(task); in tls_probe() 115 static int tls_refresh(struct rpc_task *task) in tls_refresh() argument [all …]
|
| A D | debugfs.c | 23 struct rpc_task *task = v; in tasks_show() local 24 struct rpc_clnt *clnt = task->tk_client; in tasks_show() 27 if (RPC_IS_QUEUED(task)) in tasks_show() 28 rpc_waitq = rpc_qname(task->tk_waitqueue); in tasks_show() 30 if (task->tk_rqstp) in tasks_show() 34 task->tk_pid, task->tk_flags, task->tk_status, in tasks_show() 35 clnt->cl_clid, xid, rpc_task_timeout(task), task->tk_ops, in tasks_show() 37 task->tk_action, rpc_waitq); in tasks_show() 47 struct rpc_task *task; in tasks_start() local 52 return task; in tasks_start() [all …]
|
| A D | stats.c | 154 void rpc_count_iostats_metrics(const struct rpc_task *task, in rpc_count_iostats_metrics() argument 157 struct rpc_rqst *req = task->tk_rqstp; in rpc_count_iostats_metrics() 169 op_metrics->om_timeouts += task->tk_timeouts; in rpc_count_iostats_metrics() 176 backlog = ktime_sub(req->rq_xtime, task->tk_start); in rpc_count_iostats_metrics() 182 execute = ktime_sub(now, task->tk_start); in rpc_count_iostats_metrics() 184 if (task->tk_status < 0) in rpc_count_iostats_metrics() 200 void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) in rpc_count_iostats() argument 202 rpc_count_iostats_metrics(task, in rpc_count_iostats() 203 &stats[task->tk_msg.rpc_proc->p_statidx]); in rpc_count_iostats()
|
| A D | auth_unix.c | 111 unx_marshal(struct rpc_task *task, struct xdr_stream *xdr) in unx_marshal() argument 113 struct rpc_clnt *clnt = task->tk_client; in unx_marshal() 114 struct rpc_cred *cred = task->tk_rqstp->rq_cred; in unx_marshal() 166 unx_refresh(struct rpc_task *task) in unx_refresh() argument 168 set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags); in unx_refresh() 173 unx_validate(struct rpc_task *task, struct xdr_stream *xdr) in unx_validate() argument 175 struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; in unx_validate()
|
| A D | auth_null.c | 63 nul_marshal(struct rpc_task *task, struct xdr_stream *xdr) in nul_marshal() argument 83 nul_refresh(struct rpc_task *task) in nul_refresh() argument 85 set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags); in nul_refresh() 90 nul_validate(struct rpc_task *task, struct xdr_stream *xdr) in nul_validate() argument
|
| A D | svc.c | 378 set_cpus_allowed_ptr(task, cpumask_of(node)); in svc_pool_map_set_cpumask() 802 struct task_struct *task; in svc_start_kthreads() local 818 if (IS_ERR(task)) { in svc_start_kthreads() 820 return PTR_ERR(task); in svc_start_kthreads() 823 rqstp->rq_task = task; in svc_start_kthreads() 828 wake_up_process(task); in svc_start_kthreads() 1594 struct rpc_task *task; in svc_process_bc() local 1650 task = rpc_run_bc_task(req, &timeout); in svc_process_bc() 1652 if (IS_ERR(task)) in svc_process_bc() 1655 WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); in svc_process_bc() [all …]
|
| A D | rpcb_clnt.c | 669 void rpcb_getport_async(struct rpc_task *task) in rpcb_getport_async() argument 684 clnt = rpcb_find_transport_owner(task->tk_client); in rpcb_getport_async() 686 xprt = xprt_get(task->tk_xprt); in rpcb_getport_async() 690 rpc_sleep_on_timeout(&xprt->binding, task, in rpcb_getport_async() 727 trace_rpcb_getport(clnt, task, bind_version); in rpcb_getport_async() 734 task->tk_client->cl_timeout); in rpcb_getport_async() 787 task->tk_status = status; in rpcb_getport_async()
|
| A D | xprtsock.c | 1383 struct rpc_task *task; in xs_udp_data_read_skb() local 1408 task = rovr->rq_task; in xs_udp_data_read_skb() 1422 xprt_adjust_cwnd(xprt, task, copied); in xs_udp_data_read_skb() 1425 xprt_complete_rqst(task, copied); in xs_udp_data_read_skb() 1711 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT); in xs_udp_timer() 1877 xprt_set_bound(task->tk_xprt); in xs_local_rpcbind() 2082 if (RPC_IS_ASYNC(task)) { in xs_local_connect() 2096 if (ret && !RPC_IS_SOFTCONN(task)) in xs_local_connect() 2807 transport->clnt = task->tk_client; in xs_connect() 2946 struct rpc_rqst *rqst = task->tk_rqstp; in bc_malloc() [all …]
|
| /net/sunrpc/auth_gss/ |
| A D | auth_gss.c | 1280 struct rpc_task *task; in gss_send_destroy_context() local 1289 if (!IS_ERR(task)) in gss_send_destroy_context() 1290 rpc_put_task(task); in gss_send_destroy_context() 1556 trace_rpcgss_seqno(task); in gss_marshal() 1619 task->tk_rqstp->rq_cred = new; in gss_renew_cred() 1645 gss_refresh(struct rpc_task *task) in gss_refresh() argument 1655 ret = gss_renew_cred(task); in gss_refresh() 1658 cred = task->tk_rqstp->rq_cred; in gss_refresh() 1662 ret = gss_refresh_upcall(task); in gss_refresh() 2049 trace_rpcgss_unwrap_failed(task); in gss_unwrap_resp_integ() [all …]
|
| /net/sunrpc/xprtrdma/ |
| A D | transport.c | 423 xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task) in xprt_rdma_timer() argument 479 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt)); in xprt_rdma_connect() 508 task->tk_rqstp = &req->rl_slot; in xprt_rdma_alloc_slot() 509 task->tk_status = 0; in xprt_rdma_alloc_slot() 513 task->tk_status = -ENOMEM; in xprt_rdma_alloc_slot() 514 xprt_add_backlog(xprt, task); in xprt_rdma_alloc_slot() 558 xprt_rdma_allocate(struct rpc_task *task) in xprt_rdma_allocate() argument 560 struct rpc_rqst *rqst = task->tk_rqstp; in xprt_rdma_allocate() 587 xprt_rdma_free(struct rpc_task *task) in xprt_rdma_free() argument 589 struct rpc_rqst *rqst = task->tk_rqstp; in xprt_rdma_free() [all …]
|
| A D | svc_rdma_backchannel.c | 101 xprt_rdma_bc_allocate(struct rpc_task *task) in xprt_rdma_bc_allocate() argument 103 struct rpc_rqst *rqst = task->tk_rqstp; in xprt_rdma_bc_allocate() 127 xprt_rdma_bc_free(struct rpc_task *task) in xprt_rdma_bc_free() argument 129 struct rpc_rqst *rqst = task->tk_rqstp; in xprt_rdma_bc_free()
|
| /net/netfilter/ipvs/ |
| A D | ip_vs_est.c | 252 if (kd->task) in ip_vs_est_kthread_start() 261 if (IS_ERR(kd->task)) { in ip_vs_est_kthread_start() 262 ret = PTR_ERR(kd->task); in ip_vs_est_kthread_start() 263 kd->task = NULL; in ip_vs_est_kthread_start() 267 set_user_nice(kd->task, sysctl_est_nice(ipvs)); in ip_vs_est_kthread_start() 272 wake_up_process(kd->task); in ip_vs_est_kthread_start() 280 if (kd->task) { in ip_vs_est_kthread_stop() 282 kthread_stop(kd->task); in ip_vs_est_kthread_stop() 283 kd->task = NULL; in ip_vs_est_kthread_stop() 512 if (kd->task) { in ip_vs_est_kthread_destroy() [all …]
|
| A D | ip_vs_sync.c | 198 struct task_struct *task; member 1743 struct task_struct *task; in start_sync_thread() local 1869 if (IS_ERR(task)) { in start_sync_thread() 1870 result = PTR_ERR(task); in start_sync_thread() 1873 tinfo->task = task; in start_sync_thread() 1899 if (tinfo->task) in start_sync_thread() 1900 kthread_stop(tinfo->task); in start_sync_thread() 1967 task_pid_nr(tinfo->task)); in stop_sync_thread() 1969 ret = kthread_stop(tinfo->task); in stop_sync_thread() 1989 task_pid_nr(tinfo->task)); in stop_sync_thread() [all …]
|
| /net/bluetooth/cmtp/ |
| A D | core.c | 384 session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d", in cmtp_add_connection() 386 if (IS_ERR(session->task)) { in cmtp_add_connection() 388 err = PTR_ERR(session->task); in cmtp_add_connection()
|
| A D | cmtp.h | 85 struct task_struct *task; member
|
| /net/bluetooth/bnep/ |
| A D | core.c | 631 s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name); in bnep_add_connection() 632 if (IS_ERR(s->task)) { in bnep_add_connection() 637 err = PTR_ERR(s->task); in bnep_add_connection()
|
| A D | bnep.h | 152 struct task_struct *task; member
|
| /net/bluetooth/hidp/ |
| A D | hidp.h | 145 struct task_struct *task; member
|
| A D | core.c | 1059 session->task = kthread_run(hidp_session_thread, session, in hidp_session_start_sync() 1061 if (IS_ERR(session->task)) in hidp_session_start_sync() 1062 return PTR_ERR(session->task); in hidp_session_start_sync()
|
| /net/core/ |
| A D | net_namespace.c | 1506 static struct ns_common *netns_get(struct task_struct *task) in netns_get() argument 1511 task_lock(task); in netns_get() 1512 nsproxy = task->nsproxy; in netns_get() 1515 task_unlock(task); in netns_get()
|
| /net/ |
| A D | Kconfig | 41 to tasks depending on whether the task is a compat task or not. To 44 which message to actually pass to the task.
|