| /linux/drivers/net/ethernet/intel/ice/ |
| A D | ice_fltr.c | 310 LIST_HEAD(tmp_list); in ice_fltr_prepare_mac() 318 result = mac_action(vsi, &tmp_list); in ice_fltr_prepare_mac() 319 ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); in ice_fltr_prepare_mac() 337 LIST_HEAD(tmp_list); in ice_fltr_prepare_mac_and_broadcast() 347 result = mac_action(vsi, &tmp_list); in ice_fltr_prepare_mac_and_broadcast() 348 ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list); in ice_fltr_prepare_mac_and_broadcast() 362 LIST_HEAD(tmp_list); in ice_fltr_prepare_vlan() 365 if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan)) in ice_fltr_prepare_vlan() 368 result = vlan_action(vsi, &tmp_list); in ice_fltr_prepare_vlan() 386 LIST_HEAD(tmp_list); in ice_fltr_prepare_eth() [all …]
|
| /linux/tools/perf/arch/x86/util/ |
| A D | iostat.c | 200 struct iio_root_ports_list *tmp_list; in iio_root_ports_scan() local 208 tmp_list = calloc(1, sizeof(*tmp_list)); in iio_root_ports_scan() 209 if (!tmp_list) in iio_root_ports_scan() 213 ret = iio_mapping(pmu_idx, tmp_list); in iio_root_ports_scan() 219 *list = tmp_list; in iio_root_ports_scan() 221 iio_root_ports_list_free(tmp_list); in iio_root_ports_scan() 256 struct iio_root_ports_list *tmp_list = calloc(1, sizeof(*tmp_list)); in iio_root_ports_list_filter() local 258 if (!tmp_list) in iio_root_ports_list_filter() 283 if (tmp_list->nr_entries == 0) { in iio_root_ports_list_filter() 290 iio_root_ports_list_free(tmp_list); in iio_root_ports_list_filter() [all …]
|
| /linux/fs/smb/client/ |
| A D | misc.c | 771 if (tmp_list == NULL) in cifs_close_deferred_file() 773 tmp_list->cfile = cfile; in cifs_close_deferred_file() 782 list_del(&tmp_list->list); in cifs_close_deferred_file() 783 kfree(tmp_list); in cifs_close_deferred_file() 803 if (tmp_list == NULL) in cifs_close_all_deferred_files() 805 tmp_list->cfile = cfile; in cifs_close_all_deferred_files() 814 list_del(&tmp_list->list); in cifs_close_all_deferred_files() 815 kfree(tmp_list); in cifs_close_all_deferred_files() 839 if (tmp_list == NULL) in cifs_close_deferred_file_under_dentry() 851 list_del(&tmp_list->list); in cifs_close_deferred_file_under_dentry() [all …]
|
| /linux/net/rds/ |
| A D | loop.c | 180 LIST_HEAD(tmp_list); in rds_loop_exit() 186 list_splice(&loop_conns, &tmp_list); in rds_loop_exit() 190 list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { in rds_loop_exit() 199 LIST_HEAD(tmp_list); in rds_loop_kill_conns() 207 list_move_tail(&lc->loop_node, &tmp_list); in rds_loop_kill_conns() 211 list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { in rds_loop_kill_conns()
|
| A D | tcp.c | 429 LIST_HEAD(tmp_list); in rds_tcp_destroy_conns() 434 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) in rds_tcp_destroy_conns() 435 list_move_tail(&tc->t_tcp_node, &tmp_list); in rds_tcp_destroy_conns() 439 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) in rds_tcp_destroy_conns() 608 LIST_HEAD(tmp_list); in rds_tcp_kill_sock() 620 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { in rds_tcp_kill_sock() 621 list_move_tail(&tc->t_tcp_node, &tmp_list); in rds_tcp_kill_sock() 628 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) in rds_tcp_kill_sock()
|
| A D | ib_rdma.c | 168 LIST_HEAD(tmp_list); in rds_ib_destroy_nodev_conns() 172 list_splice(&ib_nodev_conns, &tmp_list); in rds_ib_destroy_nodev_conns() 175 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) in rds_ib_destroy_nodev_conns()
|
| /linux/net/sunrpc/ |
| A D | backchannel_rqst.c | 134 struct list_head tmp_list; in xprt_setup_bc() local 150 INIT_LIST_HEAD(&tmp_list); in xprt_setup_bc() 161 list_add(&req->rq_bc_pa_list, &tmp_list); in xprt_setup_bc() 168 list_splice(&tmp_list, &xprt->bc_pa_list); in xprt_setup_bc() 181 while (!list_empty(&tmp_list)) { in xprt_setup_bc() 182 req = list_first_entry(&tmp_list, in xprt_setup_bc()
|
| /linux/tools/perf/util/ |
| A D | cgroup.c | 419 struct evlist *orig_list, *tmp_list; in evlist__expand_cgroup() local 433 tmp_list = evlist__new(); in evlist__expand_cgroup() 434 if (orig_list == NULL || tmp_list == NULL) { in evlist__expand_cgroup() 487 evlist__add(tmp_list, evsel); in evlist__expand_cgroup() 494 if (metricgroup__copy_metric_events(tmp_list, cgrp, in evlist__expand_cgroup() 500 evlist__splice_list_tail(evlist, &tmp_list->core.entries); in evlist__expand_cgroup() 501 tmp_list->core.nr_entries = 0; in evlist__expand_cgroup() 514 evlist__delete(tmp_list); in evlist__expand_cgroup()
|
| /linux/drivers/infiniband/core/ |
| A D | cq.c | 370 LIST_HEAD(tmp_list); in ib_alloc_cqs() 395 list_add_tail(&cq->pool_entry, &tmp_list); in ib_alloc_cqs() 399 list_splice(&tmp_list, &dev->cq_pools[poll_ctx]); in ib_alloc_cqs() 405 list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) { in ib_alloc_cqs()
|
| /linux/drivers/md/ |
| A D | dm-log-userspace-base.c | 489 LIST_HEAD(tmp_list); in flush_by_group() 502 list_move(&fe->list, &tmp_list); in flush_by_group() 528 list_splice_init(&tmp_list, flush_list); in flush_by_group() 539 list_splice_init(&tmp_list, flush_list); in flush_by_group()
|
| /linux/fs/nfs/ |
| A D | pnfs.c | 655 struct list_head *tmp_list) in pnfs_lseg_dec_and_remove_zero() argument 660 list_add(&lseg->pls_list, tmp_list); in pnfs_lseg_dec_and_remove_zero() 666 struct list_head *tmp_list) in mark_lseg_invalid() argument 721 struct list_head *tmp_list, in pnfs_mark_matching_lsegs_invalid() argument 780 LIST_HEAD(tmp_list); in __pnfs_destroy_layout() 790 pnfs_free_lseg_list(&tmp_list); in __pnfs_destroy_layout() 1439 LIST_HEAD(tmp_list); in _pnfs_return_layout() 1487 pnfs_free_lseg_list(&tmp_list); in _pnfs_return_layout() 2618 struct list_head *tmp_list, in pnfs_mark_matching_lsegs_return() argument 2631 tmp_list = &lo->plh_return_segs; in pnfs_mark_matching_lsegs_return() [all …]
|
| A D | pnfs.h | 279 void pnfs_free_lseg_list(struct list_head *tmp_list); 296 struct list_head *tmp_list, 300 struct list_head *tmp_list,
|
| /linux/drivers/dma/ |
| A D | pch_dma.c | 488 LIST_HEAD(tmp_list); in pd_alloc_chan_resources() 508 list_add_tail(&desc->desc_node, &tmp_list); in pd_alloc_chan_resources() 512 list_splice(&tmp_list, &pd_chan->free_list); in pd_alloc_chan_resources() 527 LIST_HEAD(tmp_list); in pd_free_chan_resources() 534 list_splice_init(&pd_chan->free_list, &tmp_list); in pd_free_chan_resources() 538 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) in pd_free_chan_resources()
|
| /linux/drivers/net/wireless/ath/ath6kl/ |
| A D | htc.h | 672 struct list_head *tmp_list; in get_queue_depth() local 675 list_for_each(tmp_list, queue) in get_queue_depth()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | tc_tun_encap.c | 174 list_for_each_entry(flow, flow_list, tmp_list) { in mlx5e_tc_encap_flows_add() 225 list_for_each_entry(flow, flow_list, tmp_list) { in mlx5e_tc_encap_flows_del() 281 list_add(&flow->tmp_list, flow_list); in mlx5e_take_tmp_flow() 418 list_add(&flow->tmp_list, &flow_list); in mlx5e_tc_update_neigh_used_value() 1456 list_for_each_entry(flow, encap_flows, tmp_list) { in mlx5e_invalidate_encap() 1505 list_for_each_entry(flow, encap_flows, tmp_list) { in mlx5e_reoffload_encap() 1618 list_for_each_entry(flow, flow_list, tmp_list) in mlx5e_unoffload_flow_list() 1629 list_for_each_entry(flow, decap_flows, tmp_list) { in mlx5e_reoffload_decap()
|
| A D | tc_priv.h | 106 struct list_head tmp_list; /* temporary flow list used by neigh update */ member
|
| /linux/include/net/ |
| A D | if_inet6.h | 72 struct list_head tmp_list; member
|
| /linux/drivers/scsi/ |
| A D | scsi_error.c | 1632 LIST_HEAD(tmp_list); in scsi_eh_target_reset() 1635 list_splice_init(work_q, &tmp_list); in scsi_eh_target_reset() 1637 while (!list_empty(&tmp_list)) { in scsi_eh_target_reset() 1645 list_splice_init(&tmp_list, work_q); in scsi_eh_target_reset() 1653 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); in scsi_eh_target_reset() 1667 list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) { in scsi_eh_target_reset()
|
| /linux/fs/ocfs2/ |
| A D | extent_map.c | 105 LIST_HEAD(tmp_list); in ocfs2_extent_map_trunc() 112 list_move(&emi->ei_list, &tmp_list); in ocfs2_extent_map_trunc() 126 list_for_each_entry_safe(emi, n, &tmp_list, ei_list) { in ocfs2_extent_map_trunc()
|
| /linux/drivers/net/team/ |
| A D | team_core.c | 111 struct list_head tmp_list; member 2438 struct team_option_inst, tmp_list); in team_nl_send_options_get() 2460 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) { in team_nl_send_options_get() 2509 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); in team_nl_options_get_doit() 2654 list_add(&opt_inst->tmp_list, &opt_inst_list); in team_nl_options_set_doit() 2866 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); in __team_options_change_check()
|
| /linux/drivers/net/ethernet/broadcom/bnx2x/ |
| A D | bnx2x_main.c | 10564 return tmp_list; in bnx2x_prev_path_get_entry() 10581 if (tmp_list) { in bnx2x_prev_path_mark_eeh() 10582 tmp_list->aer = 1; in bnx2x_prev_path_mark_eeh() 10603 if (tmp_list) { in bnx2x_prev_is_path_marked() 10604 if (tmp_list->aer) { in bnx2x_prev_is_path_marked() 10647 if (tmp_list) { in bnx2x_prev_mark_path() 10648 if (!tmp_list->aer) { in bnx2x_prev_mark_path() 10653 tmp_list->aer = 0; in bnx2x_prev_mark_path() 10662 if (!tmp_list) { in bnx2x_prev_mark_path() 10670 tmp_list->aer = 0; in bnx2x_prev_mark_path() [all …]
|
| /linux/drivers/net/wireless/ath/carl9170/ |
| A D | main.c | 196 list_add_tail(&tid_info->tmp_list, &tid_gc); in carl9170_ampdu_gc() 209 tmp_list); in carl9170_ampdu_gc() 214 list_del_init(&tid_info->tmp_list); in carl9170_ampdu_gc() 1428 INIT_LIST_HEAD(&tid_info->tmp_list); in carl9170_op_ampdu_action()
|
| A D | carl9170.h | 122 struct list_head tmp_list; member
|
| /linux/drivers/xen/ |
| A D | xen-scsiback.c | 1238 struct list_head tmp_list; in scsiback_release_translation_entry() local 1243 list_cut_before(&tmp_list, head, head); in scsiback_release_translation_entry() 1247 list_for_each_entry_safe(entry, tmp, &tmp_list, l) { in scsiback_release_translation_entry()
|
| /linux/drivers/char/ |
| A D | virtio_console.c | 385 LIST_HEAD(tmp_list); in reclaim_dma_bufs() 392 list_cut_position(&tmp_list, &pending_free_dma_bufs, in reclaim_dma_bufs() 397 list_for_each_entry_safe(buf, tmp, &tmp_list, list) { in reclaim_dma_bufs()
|