Lines Matching refs:fl

247 static inline unsigned int fl_cap(const struct sge_fl *fl)  in fl_cap()  argument
249 return fl->size - FL_PER_EQ_UNIT; in fl_cap()
262 const struct sge_fl *fl) in fl_starving() argument
266 return fl->avail - fl->pend_cred <= s->fl_starve_thres; in fl_starving()
473 static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) in free_rx_bufs() argument
476 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; in free_rx_bufs()
484 if (++fl->cidx == fl->size) in free_rx_bufs()
485 fl->cidx = 0; in free_rx_bufs()
486 fl->avail--; in free_rx_bufs()
503 static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) in unmap_rx_buf() argument
505 struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx]; in unmap_rx_buf()
512 if (++fl->cidx == fl->size) in unmap_rx_buf()
513 fl->cidx = 0; in unmap_rx_buf()
514 fl->avail--; in unmap_rx_buf()
525 static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) in ring_fl_db() argument
533 if (fl->pend_cred >= FL_PER_EQ_UNIT) { in ring_fl_db()
535 val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); in ring_fl_db()
537 val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT); in ring_fl_db()
548 if (unlikely(fl->bar2_addr == NULL)) { in ring_fl_db()
551 QID_V(fl->cntxt_id) | val); in ring_fl_db()
553 writel(val | QID_V(fl->bar2_qid), in ring_fl_db()
554 fl->bar2_addr + SGE_UDB_KDOORBELL); in ring_fl_db()
561 fl->pend_cred %= FL_PER_EQ_UNIT; in ring_fl_db()
604 static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, in refill_fl() argument
610 unsigned int cred = fl->avail; in refill_fl()
611 __be64 *d = &fl->desc[fl->pidx]; in refill_fl()
612 struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx]; in refill_fl()
619 BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT); in refill_fl()
640 fl->large_alloc_failed++; in refill_fl()
666 fl->avail++; in refill_fl()
667 if (++fl->pidx == fl->size) { in refill_fl()
668 fl->pidx = 0; in refill_fl()
669 sdesc = fl->sdesc; in refill_fl()
670 d = fl->desc; in refill_fl()
679 fl->alloc_failed++; in refill_fl()
695 fl->avail++; in refill_fl()
696 if (++fl->pidx == fl->size) { in refill_fl()
697 fl->pidx = 0; in refill_fl()
698 sdesc = fl->sdesc; in refill_fl()
699 d = fl->desc; in refill_fl()
709 cred = fl->avail - cred; in refill_fl()
710 fl->pend_cred += cred; in refill_fl()
711 ring_fl_db(adapter, fl); in refill_fl()
713 if (unlikely(fl_starving(adapter, fl))) { in refill_fl()
715 set_bit(fl->cntxt_id, adapter->sge.starving_fl); in refill_fl()
725 static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl) in __refill_fl() argument
727 refill_fl(adapter, fl, in __refill_fl()
728 min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
1708 static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, in restore_rx_bufs() argument
1714 if (fl->cidx == 0) in restore_rx_bufs()
1715 fl->cidx = fl->size - 1; in restore_rx_bufs()
1717 fl->cidx--; in restore_rx_bufs()
1718 sdesc = &fl->sdesc[fl->cidx]; in restore_rx_bufs()
1721 fl->avail++; in restore_rx_bufs()
1793 free_rx_bufs(rspq->adapter, &rxq->fl, in process_responses()
1806 BUG_ON(rxq->fl.avail == 0); in process_responses()
1807 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses()
1815 unmap_rx_buf(rspq->adapter, &rxq->fl); in process_responses()
1839 restore_rx_bufs(&gl, &rxq->fl, frag); in process_responses()
1869 fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) in process_responses()
1870 __refill_fl(rspq->adapter, &rxq->fl); in process_responses()
2082 struct sge_fl *fl = s->egr_map[id]; in sge_rx_timer_cb() local
2093 if (fl_starving(adapter, fl)) { in sge_rx_timer_cb()
2096 rxq = container_of(fl, struct sge_eth_rxq, fl); in sge_rx_timer_cb()
2098 fl->starving++; in sge_rx_timer_cb()
2204 struct sge_fl *fl, rspq_handler_t hnd) in t4vf_sge_alloc_rxq() argument
2269 if (fl) { in t4vf_sge_alloc_rxq()
2280 if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT) in t4vf_sge_alloc_rxq()
2281 fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT; in t4vf_sge_alloc_rxq()
2282 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); in t4vf_sge_alloc_rxq()
2283 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, in t4vf_sge_alloc_rxq()
2285 &fl->addr, &fl->sdesc, s->stat_len); in t4vf_sge_alloc_rxq()
2286 if (!fl->desc) { in t4vf_sge_alloc_rxq()
2296 flsz = (fl->size / FL_PER_EQ_UNIT + in t4vf_sge_alloc_rxq()
2328 cmd.fl0addr = cpu_to_be64(fl->addr); in t4vf_sge_alloc_rxq()
2356 rspq->offset = fl ? 0 : -1; in t4vf_sge_alloc_rxq()
2358 if (fl) { in t4vf_sge_alloc_rxq()
2359 fl->cntxt_id = be16_to_cpu(rpl.fl0id); in t4vf_sge_alloc_rxq()
2360 fl->avail = 0; in t4vf_sge_alloc_rxq()
2361 fl->pend_cred = 0; in t4vf_sge_alloc_rxq()
2362 fl->pidx = 0; in t4vf_sge_alloc_rxq()
2363 fl->cidx = 0; in t4vf_sge_alloc_rxq()
2364 fl->alloc_failed = 0; in t4vf_sge_alloc_rxq()
2365 fl->large_alloc_failed = 0; in t4vf_sge_alloc_rxq()
2366 fl->starving = 0; in t4vf_sge_alloc_rxq()
2371 fl->bar2_addr = bar2_address(adapter, in t4vf_sge_alloc_rxq()
2372 fl->cntxt_id, in t4vf_sge_alloc_rxq()
2374 &fl->bar2_qid); in t4vf_sge_alloc_rxq()
2376 refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL); in t4vf_sge_alloc_rxq()
2391 if (fl && fl->desc) { in t4vf_sge_alloc_rxq()
2392 kfree(fl->sdesc); in t4vf_sge_alloc_rxq()
2393 fl->sdesc = NULL; in t4vf_sge_alloc_rxq()
2395 fl->desc, fl->addr); in t4vf_sge_alloc_rxq()
2396 fl->desc = NULL; in t4vf_sge_alloc_rxq()
2528 struct sge_fl *fl) in free_rspq_fl() argument
2531 unsigned int flid = fl ? fl->cntxt_id : 0xffff; in free_rspq_fl()
2543 if (fl) { in free_rspq_fl()
2544 free_rx_bufs(adapter, fl, fl->avail); in free_rspq_fl()
2546 fl->size * sizeof(*fl->desc) + s->stat_len, in free_rspq_fl()
2547 fl->desc, fl->addr); in free_rspq_fl()
2548 kfree(fl->sdesc); in free_rspq_fl()
2549 fl->sdesc = NULL; in free_rspq_fl()
2550 fl->cntxt_id = 0; in free_rspq_fl()
2551 fl->desc = NULL; in free_rspq_fl()
2572 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); in t4vf_free_sge_resources()