Lines Matching refs:qs

731 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)  in init_qset_cntxt()  argument
733 qs->rspq.cntxt_id = id; in init_qset_cntxt()
734 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt()
735 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt()
736 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt()
737 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt()
738 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt()
739 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt()
740 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; in init_qset_cntxt()
1251 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument
1254 set_bit(TXQ_ETH, &qs->txq_stopped); in t3_stop_tx_queue()
1272 struct sge_qset *qs; in t3_eth_xmit() local
1286 qs = &pi->qs[qidx]; in t3_eth_xmit()
1287 q = &qs->txq[TXQ_ETH]; in t3_eth_xmit()
1296 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1313 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1316 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { in t3_eth_xmit()
1335 qs->port_stats[SGE_PSTAT_TX_CSUM]++; in t3_eth_xmit()
1337 qs->port_stats[SGE_PSTAT_TSO]++; in t3_eth_xmit()
1339 qs->port_stats[SGE_PSTAT_VLANINS]++; in t3_eth_xmit()
1432 struct sge_qset *qs = txq_to_qset(q, qid); in check_desc_avail() local
1434 set_bit(qid, &qs->txq_stopped); in check_desc_avail()
1438 test_and_clear_bit(qid, &qs->txq_stopped)) in check_desc_avail()
1528 struct sge_qset *qs = container_of(w, struct sge_qset, in restart_ctrlq() local
1530 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in restart_ctrlq()
1548 set_bit(TXQ_CTRL, &qs->txq_stopped); in restart_ctrlq()
1552 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) in restart_ctrlq()
1559 t3_write_reg(qs->adap, A_SG_KDOORBELL, in restart_ctrlq()
1570 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); in t3_mgmt_tx()
1748 struct sge_qset *qs = container_of(w, struct sge_qset, in restart_offloadq() local
1750 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in restart_offloadq()
1751 const struct port_info *pi = netdev_priv(qs->netdev); in restart_offloadq()
1763 set_bit(TXQ_OFLD, &qs->txq_stopped); in restart_offloadq()
1767 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) in restart_offloadq()
1841 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; in t3_offload_tx() local
1844 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); in t3_offload_tx()
1846 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); in t3_offload_tx()
1865 struct sge_qset *qs = rspq_to_qset(q); in offload_enqueue() local
1867 napi_schedule(&qs->napi); in offload_enqueue()
1903 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); in ofld_poll() local
1904 struct sge_rspq *q = &qs->rspq; in ofld_poll()
1905 struct adapter *adapter = qs->adap; in ofld_poll()
1990 static void restart_tx(struct sge_qset *qs) in restart_tx() argument
1992 if (test_bit(TXQ_ETH, &qs->txq_stopped) && in restart_tx()
1993 should_restart_tx(&qs->txq[TXQ_ETH]) && in restart_tx()
1994 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { in restart_tx()
1995 qs->txq[TXQ_ETH].restarts++; in restart_tx()
1996 if (netif_running(qs->netdev)) in restart_tx()
1997 netif_tx_wake_queue(qs->tx_q); in restart_tx()
2000 if (test_bit(TXQ_OFLD, &qs->txq_stopped) && in restart_tx()
2001 should_restart_tx(&qs->txq[TXQ_OFLD]) && in restart_tx()
2002 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) { in restart_tx()
2003 qs->txq[TXQ_OFLD].restarts++; in restart_tx()
2006 queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task); in restart_tx()
2008 if (test_bit(TXQ_CTRL, &qs->txq_stopped) && in restart_tx()
2009 should_restart_tx(&qs->txq[TXQ_CTRL]) && in restart_tx()
2010 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) { in restart_tx()
2011 qs->txq[TXQ_CTRL].restarts++; in restart_tx()
2014 queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task); in restart_tx()
2093 struct sge_qset *qs = rspq_to_qset(rq); in rx_eth() local
2101 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; in rx_eth()
2105 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); in rx_eth()
2108 qs->port_stats[SGE_PSTAT_VLANEX]++; in rx_eth()
2113 napi_gro_receive(&qs->napi, skb); in rx_eth()
2139 static void lro_add_page(struct adapter *adap, struct sge_qset *qs, in lro_add_page() argument
2143 struct port_info *pi = netdev_priv(qs->netdev); in lro_add_page()
2150 if (!qs->nomem) { in lro_add_page()
2151 skb = napi_get_frags(&qs->napi); in lro_add_page()
2152 qs->nomem = !skb; in lro_add_page()
2169 qs->nomem = 0; in lro_add_page()
2178 cpl = qs->lro_va = sd->pg_chunk.va + 2; in lro_add_page()
2180 if ((qs->netdev->features & NETIF_F_RXCSUM) && in lro_add_page()
2183 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; in lro_add_page()
2187 cpl = qs->lro_va; in lro_add_page()
2204 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); in lro_add_page()
2207 qs->port_stats[SGE_PSTAT_VLANEX]++; in lro_add_page()
2210 napi_gro_frags(&qs->napi); in lro_add_page()
2222 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags) in handle_rsp_cntrl_info() argument
2228 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); in handle_rsp_cntrl_info()
2233 qs->txq[TXQ_ETH].processed += credits; in handle_rsp_cntrl_info()
2237 qs->txq[TXQ_CTRL].processed += credits; in handle_rsp_cntrl_info()
2241 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); in handle_rsp_cntrl_info()
2245 qs->txq[TXQ_OFLD].processed += credits; in handle_rsp_cntrl_info()
2258 static void check_ring_db(struct adapter *adap, struct sge_qset *qs, in check_ring_db() argument
2262 struct sge_txq *txq = &qs->txq[TXQ_ETH]; in check_ring_db()
2273 struct sge_txq *txq = &qs->txq[TXQ_OFLD]; in check_ring_db()
2328 static int process_responses(struct adapter *adap, struct sge_qset *qs, in process_responses() argument
2331 struct sge_rspq *q = &qs->rspq; in process_responses()
2342 int lro = !!(qs->netdev->features & NETIF_F_GRO); in process_responses()
2379 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; in process_responses()
2386 lro_add_page(adap, qs, fl, in process_responses()
2414 handle_rsp_cntrl_info(qs, flags); in process_responses()
2456 check_ring_db(adap, qs, sleeping); in process_responses()
2459 if (unlikely(qs->txq_stopped != 0)) in process_responses()
2460 restart_tx(qs); in process_responses()
2482 struct sge_qset *qs = container_of(napi, struct sge_qset, napi); in napi_rx_handler() local
2483 struct adapter *adap = qs->adap; in napi_rx_handler()
2484 int work_done = process_responses(adap, qs, budget); in napi_rx_handler()
2503 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | in napi_rx_handler()
2504 V_NEWTIMER(qs->rspq.next_holdoff) | in napi_rx_handler()
2505 V_NEWINDEX(qs->rspq.cidx)); in napi_rx_handler()
2532 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, in process_pure_responses() argument
2535 struct sge_rspq *q = &qs->rspq; in process_pure_responses()
2551 handle_rsp_cntrl_info(qs, flags); in process_pure_responses()
2565 check_ring_db(adap, qs, sleeping); in process_pure_responses()
2568 if (unlikely(qs->txq_stopped != 0)) in process_pure_responses()
2569 restart_tx(qs); in process_pure_responses()
2591 struct sge_qset *qs = rspq_to_qset(q); in handle_responses() local
2597 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { in handle_responses()
2602 napi_schedule(&qs->napi); in handle_responses()
2612 struct sge_qset *qs = cookie; in t3_sge_intr_msix() local
2613 struct adapter *adap = qs->adap; in t3_sge_intr_msix()
2614 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix()
2617 if (process_responses(adap, qs, -1) == 0) in t3_sge_intr_msix()
2631 struct sge_qset *qs = cookie; in t3_sge_intr_msix_napi() local
2632 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix_napi()
2636 if (handle_responses(qs->adap, q) < 0) in t3_sge_intr_msix_napi()
2652 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi()
2656 if (process_responses(adap, &adap->sge.qs[0], -1)) { in t3_intr_msi()
2663 process_responses(adap, &adap->sge.qs[1], -1)) { in t3_intr_msi()
2664 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr_msi()
2679 static int rspq_check_napi(struct sge_qset *qs) in rspq_check_napi() argument
2681 struct sge_rspq *q = &qs->rspq; in rspq_check_napi()
2683 if (!napi_is_scheduled(&qs->napi) && in rspq_check_napi()
2685 napi_schedule(&qs->napi); in rspq_check_napi()
2702 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi_napi()
2706 new_packets = rspq_check_napi(&adap->sge.qs[0]); in t3_intr_msi_napi()
2708 new_packets += rspq_check_napi(&adap->sge.qs[1]); in t3_intr_msi_napi()
2740 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; in t3_intr()
2741 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr()
2778 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; in t3b_intr()
2795 process_responses_gts(adap, &adap->sge.qs[1].rspq); in t3b_intr()
2812 struct sge_qset *qs0 = &adap->sge.qs[0]; in t3b_intr_napi()
2830 napi_schedule(&adap->sge.qs[1].napi); in t3b_intr_napi()
2928 struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer); in sge_timer_tx() local
2929 struct port_info *pi = netdev_priv(qs->netdev); in sge_timer_tx()
2934 if (__netif_tx_trylock(qs->tx_q)) { in sge_timer_tx()
2935 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], in sge_timer_tx()
2937 __netif_tx_unlock(qs->tx_q); in sge_timer_tx()
2940 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) { in sge_timer_tx()
2941 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], in sge_timer_tx()
2943 spin_unlock(&qs->txq[TXQ_OFLD].lock); in sge_timer_tx()
2949 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); in sge_timer_tx()
2969 struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer); in sge_timer_rx() local
2970 struct port_info *pi = netdev_priv(qs->netdev); in sge_timer_rx()
2975 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; in sge_timer_rx()
2980 if (napi_is_scheduled(&qs->napi)) in sge_timer_rx()
2986 if (status & (1 << qs->rspq.cntxt_id)) { in sge_timer_rx()
2987 qs->rspq.starved++; in sge_timer_rx()
2988 if (qs->rspq.credits) { in sge_timer_rx()
2989 qs->rspq.credits--; in sge_timer_rx()
2990 refill_rspq(adap, &qs->rspq, 1); in sge_timer_rx()
2991 qs->rspq.restarted++; in sge_timer_rx()
2993 1 << qs->rspq.cntxt_id); in sge_timer_rx()
2998 if (qs->fl[0].credits < qs->fl[0].size) in sge_timer_rx()
2999 __refill_fl(adap, &qs->fl[0]); in sge_timer_rx()
3000 if (qs->fl[1].credits < qs->fl[1].size) in sge_timer_rx()
3001 __refill_fl(adap, &qs->fl[1]); in sge_timer_rx()
3006 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); in sge_timer_rx()
3017 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p) in t3_update_qset_coalesce() argument
3019 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ in t3_update_qset_coalesce()
3020 qs->rspq.polling = p->polling; in t3_update_qset_coalesce()
3021 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll; in t3_update_qset_coalesce()
3046 struct sge_qset *q = &adapter->sge.qs[id]; in t3_sge_alloc_qset()
3219 struct sge_qset *q = &adap->sge.qs[i]; in t3_start_sge_timers()
3242 struct sge_qset *q = &adap->sge.qs[i]; in t3_stop_sge_timers()
3262 t3_free_qset(adap, &adap->sge.qs[i]); in t3_free_sge_resources()
3311 struct sge_qset *qs = &adap->sge.qs[i]; in t3_sge_stop() local
3313 cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task); in t3_sge_stop()
3314 cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task); in t3_sge_stop()