Home
last modified time | relevance | path

Searched refs:sch (Results 1 – 25 of 101) sorted by relevance

12345

/linux/drivers/s390/cio/
A Dcio.c106 sch->lpm = 0; in cio_start_handle_notoper()
223 if (!sch) in cio_halt()
257 if (!sch) in cio_clear()
292 if (!sch) in cio_cancel()
488 sch->config.isc = sch->isc; in cio_enable_subchannel()
554 if (sch->driver && sch->driver->irq) in do_cio_interrupt()
555 sch->driver->irq(sch); in do_cio_interrupt()
599 if (sch->driver && sch->driver->irq) in cio_tsch()
600 sch->driver->irq(sch); in cio_tsch()
668 sch->config.intparm = (u32)virt_to_phys(sch); in cio_probe_console()
[all …]
A Deadm_sch.c71 EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid)); in eadm_subchannel_start()
91 cc = csch(sch->schid); in eadm_subchannel_clear()
102 struct subchannel *sch = private->sch; in eadm_subchannel_timeout() local
106 EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid)); in eadm_subchannel_timeout()
165 sch = private->sch; in eadm_get_idle_sch()
173 return sch; in eadm_get_idle_sch()
190 if (!sch) in eadm_start_aob()
227 private->sch = sch; in eadm_subchannel_probe()
229 ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch)); in eadm_subchannel_probe()
283 eadm_quiesce(sch); in eadm_subchannel_remove()
[all …]
A Dcss.c99 if (sch) { in call_fn_all_sch()
207 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); in css_alloc_subchannel()
208 if (!sch) in css_alloc_subchannel()
219 sch->dev.dma_mask = &sch->dma_mask; in css_alloc_subchannel()
302 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); in css_update_ssd_info()
449 sch->schid.ssid, sch->schid.sch_no, ret); in css_register_subchannel()
534 ret = sch->driver->sch_event(sch, slow); in css_evaluate_known_subchannel()
1381 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; in css_probe()
1393 sch->driver->remove(sch); in css_remove()
1402 if (sch->driver && sch->driver->shutdown) in css_shutdown()
[all …]
A Ddevice.c577 sch->schid.ssid, sch->schid.sch_no, rc); in initiate_logging()
581 sch->schid.ssid, sch->schid.sch_no); in initiate_logging()
1002 memset(&sch->config, 0, sizeof(sch->config)); in io_subchannel_init_config()
1011 sch->opm = chp_get_sch_opm(sch); in io_subchannel_init_fields()
1012 sch->lpm = sch->schib.pmcw.pam & sch->opm; in io_subchannel_init_fields()
1017 sch->schib.pmcw.dev, sch->schid.ssid, in io_subchannel_init_fields()
1018 sch->schid.sch_no, sch->schib.pmcw.pim, in io_subchannel_init_fields()
1019 sch->schib.pmcw.pam, sch->schib.pmcw.pom); in io_subchannel_init_fields()
1183 sch->lpm |= mask & sch->opm; in io_subchannel_chp_event()
1260 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) in recovery_check()
[all …]
A Dvfio_ccw_drv.c59 sch->schid.ssid, sch->schid.sch_no); in vfio_ccw_sch_quiesce()
144 sch->schid.cssid, sch->schid.ssid, in vfio_ccw_sch_irq()
197 sch->schid.cssid, sch->schid.ssid, in vfio_ccw_sch_probe()
219 sch->schid.cssid, sch->schid.ssid, in vfio_ccw_sch_remove()
312 sch->schid.cssid, in vfio_ccw_chp_event()
313 sch->schid.ssid, sch->schid.sch_no, in vfio_ccw_chp_event()
322 sch->opm &= ~mask; in vfio_ccw_chp_event()
323 sch->lpm &= ~mask; in vfio_ccw_chp_event()
336 sch->opm |= mask; in vfio_ccw_chp_event()
337 sch->lpm |= mask; in vfio_ccw_chp_event()
[all …]
A Dvfio_ccw_fsm.c61 sch->lpm &= ~lpm; in fsm_io_helper()
63 sch->lpm = 0; in fsm_io_helper()
65 if (cio_update_schib(sch)) in fsm_io_helper()
92 ccode = hsch(sch->schid); in fsm_do_halt()
131 ccode = csch(sch->schid); in fsm_do_clear()
160 sch->schid.cssid, in fsm_notoper()
161 sch->schid.ssid, in fsm_notoper()
162 sch->schid.sch_no, in fsm_notoper()
237 return sch->schid; in get_schid()
380 sch->isc = VFIO_CCW_ISC; in fsm_open()
[all …]
A Ddevice_fsm.c41 struct subchannel *sch; in ccw_timeout_log() local
61 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); in ccw_timeout_log()
202 old_lpm = sch->lpm; in ccw_device_recog_done()
208 sch->lpm = sch->schib.pmcw.pam & sch->opm; in ccw_device_recog_done()
401 if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch))) { in ccw_device_recognition()
476 u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm; in ccw_device_handle_broken_paths()
495 sch->lpm = sch->vpm; in ccw_device_verify_done()
557 ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch)); in ccw_device_online()
700 if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch))) in ccw_device_boxed_verify()
931 if (cio_enable_subchannel(sch, (u32)virt_to_phys(sch)) != 0) in ccw_device_start_id()
[all …]
/linux/drivers/gpio/
A Dgpio-sch.c56 gpio -= sch->resume_base; in sch_gpio_offset()
295 acpi_remove_gpe_handler(NULL, sch->gpe, sch->gpe_handler); in sch_gpio_remove_gpe_handler()
304 sch->gpe_handler, sch); in sch_gpio_install_gpe_handler()
315 acpi_remove_gpe_handler(NULL, sch->gpe, sch->gpe_handler); in sch_gpio_install_gpe_handler()
326 struct sch_gpio *sch; in sch_gpio_probe() local
331 sch = devm_kzalloc(dev, sizeof(*sch), GFP_KERNEL); in sch_gpio_probe()
332 if (!sch) in sch_gpio_probe()
343 sch->regs = regs; in sch_gpio_probe()
353 sch->chip.ngpio = 14; in sch_gpio_probe()
381 sch->chip.ngpio = 8; in sch_gpio_probe()
[all …]
/linux/net/sched/
A Dsch_fifo.c23 READ_ONCE(sch->limit))) in bfifo_enqueue()
32 if (likely(sch->q.qlen < READ_ONCE(sch->limit))) in pfifo_enqueue()
43 if (likely(sch->q.qlen < READ_ONCE(sch->limit))) in pfifo_tail_enqueue()
48 __qdisc_queue_drop_head(sch, &sch->q, to_free); in pfifo_tail_enqueue()
49 qdisc_qstats_drop(sch); in pfifo_tail_enqueue()
52 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); in pfifo_tail_enqueue()
65 qopt.handle = sch->handle; in fifo_offload_init()
66 qopt.parent = sch->parent; in fifo_offload_init()
120 bypass = sch->limit >= psched_mtu(qdisc_dev(sch)); in __fifo_init()
141 fifo_offload_init(sch); in fifo_init()
[all …]
A Dsch_mq.c29 .handle = sch->handle, in mq_offload()
42 .handle = sch->handle, in mq_offload_stats()
44 .bstats = &sch->bstats, in mq_offload_stats()
45 .qstats = &sch->qstats, in mq_offload_stats()
58 mq_offload(sch, TC_MQ_DESTROY); in mq_destroy()
76 if (sch->parent != TC_H_ROOT) in mq_init()
100 sch->flags |= TCQ_F_MQROOT; in mq_init()
134 sch->q.qlen = 0; in mq_dump()
136 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mq_dump()
196 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old, in mq_graft()
[all …]
A Dsch_prio.c80 qdisc_qstats_drop(sch); in prio_enqueue()
89 sch->q.qlen++; in prio_enqueue()
93 qdisc_qstats_drop(sch); in prio_enqueue()
122 sch->q.qlen--; in prio_dequeue()
144 .handle = sch->handle, in prio_offload()
145 .parent = sch->parent, in prio_offload()
209 sch_tree_lock(sch); in prio_tune()
222 sch_tree_unlock(sch); in prio_tune()
249 .handle = sch->handle, in prio_dump_offload()
250 .parent = sch->parent, in prio_dump_offload()
[all …]
A Dsch_codel.c41 struct Qdisc *sch = ctx; in dequeue_func() local
56 qdisc_qstats_drop(sch); in drop_func()
64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue()
86 if (likely(qdisc_qlen(sch) < sch->limit)) { in codel_qdisc_enqueue()
90 q = qdisc_priv(sch); in codel_qdisc_enqueue()
116 sch_tree_lock(sch); in codel_change()
147 qlen = sch->q.qlen; in codel_change()
148 while (sch->q.qlen > sch->limit) { in codel_change()
155 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); in codel_change()
157 sch_tree_unlock(sch); in codel_change()
[all …]
A Dsch_ingress.c37 return ingress_find(sch, classid); in ingress_bind_filter()
51 struct ingress_sched_data *q = qdisc_priv(sch); in ingress_tcf_block()
81 struct net_device *dev = qdisc_dev(sch); in ingress_init()
86 if (sch->parent != TC_H_INGRESS) in ingress_init()
115 struct net_device *dev = qdisc_dev(sch); in ingress_destroy()
118 if (sch->parent != TC_H_INGRESS) in ingress_destroy()
196 return clsact_find(sch, classid); in clsact_bind_filter()
246 struct net_device *dev = qdisc_dev(sch); in clsact_init()
251 if (sch->parent != TC_H_CLSACT) in clsact_init()
294 struct net_device *dev = qdisc_dev(sch); in clsact_destroy()
[all …]
A Dsch_red.c44 struct Qdisc *sch; member
134 sch->q.qlen++; in red_enqueue()
160 sch->q.qlen--; in red_dequeue()
189 .handle = sch->handle, in red_offload()
190 .parent = sch->parent, in red_offload()
275 sch_tree_lock(sch); in __red_change()
305 sch_tree_unlock(sch); in __red_change()
314 sch_tree_unlock(sch); in __red_change()
323 struct Qdisc *sch = q->sch; in red_adaptative_timer() local
343 q->sch = sch; in red_init()
[all …]
A Dsch_ets.c122 qopt.handle = sch->handle; in ets_offload_change()
123 qopt.parent = sch->parent; in ets_offload_change()
155 qopt.handle = sch->handle; in ets_offload_destroy()
156 qopt.parent = sch->parent; in ets_offload_destroy()
239 sch_tree_lock(sch); in ets_class_change()
241 sch_tree_unlock(sch); in ets_class_change()
243 ets_offload_change(sch); in ets_class_change()
443 sch->q.qlen++; in ets_qdisc_enqueue()
452 sch->q.qlen--; in ets_qdisc_dequeue_skb()
647 sch_tree_lock(sch); in ets_qdisc_change()
[all …]
A Dsch_multiq.c71 qdisc_qstats_drop(sch); in multiq_enqueue()
79 sch->q.qlen++; in multiq_enqueue()
83 qdisc_qstats_drop(sch); in multiq_enqueue()
109 sch->q.qlen--; in multiq_dequeue()
148 multiq_reset(struct Qdisc *sch) in multiq_reset() argument
159 multiq_destroy(struct Qdisc *sch) in multiq_destroy() argument
193 sch_tree_lock(sch); in multiq_tune()
205 sch_tree_unlock(sch); in multiq_tune()
216 TC_H_MAKE(sch->handle, in multiq_tune()
219 sch_tree_lock(sch); in multiq_tune()
[all …]
A Dsch_tbf.c152 qopt.handle = sch->handle; in tbf_offload_change()
153 qopt.parent = sch->parent; in tbf_offload_change()
192 .handle = sch->handle, in tbf_offload_graft()
193 .parent = sch->parent, in tbf_offload_graft()
198 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old, in tbf_offload_graft()
232 sch->q.qlen += nb; in tbf_segment()
260 sch->q.qlen++; in tbf_enqueue()
305 sch->q.qlen--; in tbf_dequeue()
447 sch_tree_lock(sch); in tbf_change()
469 sch_tree_unlock(sch); in tbf_change()
[all …]
A Dsch_drr.c95 sch_tree_lock(sch); in drr_change_class()
98 sch_tree_unlock(sch); in drr_change_class()
129 sch_tree_lock(sch); in drr_change_class()
131 sch_tree_unlock(sch); in drr_change_class()
157 sch_tree_lock(sch); in drr_delete_class()
162 sch_tree_unlock(sch); in drr_delete_class()
164 drr_destroy_class(sch, cl); in drr_delete_class()
344 qdisc_qstats_drop(sch); in drr_enqueue()
354 qdisc_qstats_drop(sch); in drr_enqueue()
365 sch->q.qlen++; in drr_enqueue()
[all …]
A Dsch_etf.c192 sch->q.qlen++; in etf_enqueue_timesortedlist()
195 reset_watchdog(sch); in etf_enqueue_timesortedlist()
218 skb->dev = qdisc_dev(sch); in timesortedlist_drop()
224 qdisc_qstats_overlimit(sch); in timesortedlist_drop()
225 sch->q.qlen--; in timesortedlist_drop()
242 skb->dev = qdisc_dev(sch); in timesortedlist_remove()
250 sch->q.qlen--; in timesortedlist_remove()
291 reset_watchdog(sch); in etf_dequeue_timesortedlist()
429 sch->q.qlen--; in timesortedlist_clear()
438 if (q->watchdog.qdisc == sch) in etf_reset()
[all …]
A Dsch_cbs.c99 sch->qstats.backlog += len; in cbs_child_enqueue()
100 sch->q.qlen++; in cbs_child_enqueue()
136 return q->enqueue(skb, sch, to_free); in cbs_enqueue()
169 qdisc_qstats_backlog_dec(sch, skb); in cbs_child_dequeue()
170 qdisc_bstats_update(sch, skb); in cbs_child_dequeue()
171 sch->q.qlen--; in cbs_child_dequeue()
207 skb = cbs_child_dequeue(sch, qdisc); in cbs_dequeue_soft()
243 return q->dequeue(sch); in cbs_dequeue()
413 sch->handle, extack); in cbs_init()
430 return cbs_change(sch, opt, extack); in cbs_init()
[all …]
A Dsch_skbprio.c84 if (sch->q.qlen < READ_ONCE(sch->limit)) { in skbprio_enqueue()
86 qdisc_qstats_backlog_inc(sch, skb); in skbprio_enqueue()
96 sch->q.qlen++; in skbprio_enqueue()
109 qdisc_qstats_backlog_inc(sch, skb); in skbprio_enqueue()
117 qdisc_drop(to_drop, sch, to_free); in skbprio_enqueue()
127 BUG_ON(sch->q.qlen != 1); in skbprio_enqueue()
150 sch->q.qlen--; in skbprio_dequeue()
152 qdisc_bstats_update(sch, skb); in skbprio_dequeue()
159 BUG_ON(sch->q.qlen); in skbprio_dequeue()
194 sch->limit = 64; in skbprio_init()
[all …]
A Dsch_fq_codel.c180 sch->q.qlen -= i; in fq_codel_drop()
217 if (++sch->q.qlen <= sch->limit && !memory_limited) in fq_codel_enqueue()
267 sch->q.qlen--; in dequeue_func()
303 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, in fq_codel_dequeue()
394 sch_tree_lock(sch); in fq_codel_change()
443 while (sch->q.qlen > sch->limit || in fq_codel_change()
455 sch_tree_unlock(sch); in fq_codel_change()
475 sch->limit = 10*1024; in fq_codel_init()
517 if (sch->limit >= 1) in fq_codel_init()
594 sch_tree_lock(sch); in fq_codel_dump_stats()
[all …]
A Dsch_pie.c30 struct Qdisc *sch; member
91 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { in pie_qdisc_enqueue()
96 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, in pie_qdisc_enqueue()
151 sch_tree_lock(sch); in pie_change()
193 qlen = sch->q.qlen; in pie_change()
194 while (sch->q.qlen > sch->limit) { in pie_change()
201 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); in pie_change()
203 sch_tree_unlock(sch); in pie_change()
425 struct Qdisc *sch = q->sch; in pie_timer() local
449 q->sch = sch; in pie_init()
[all …]
A Dsch_choke.c128 qdisc_drop(skb, sch, to_free); in choke_drop_by_idx()
129 --sch->q.qlen; in choke_drop_by_idx()
267 if (sch->q.qlen < q->limit) { in choke_enqueue()
270 ++sch->q.qlen; in choke_enqueue()
297 --sch->q.qlen; in choke_dequeue()
314 rtnl_qdisc_drop(skb, sch); in choke_reset()
377 sch_tree_lock(sch); in choke_change()
395 --sch->q.qlen; in choke_change()
398 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped); in choke_change()
406 sch_tree_lock(sch); in choke_change()
[all …]
/linux/include/net/
A Dsch_generic.h851 return sch->enqueue(skb, sch, to_free); in qdisc_enqueue()
956 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); in qdisc_qstats_copy()
964 gnet_stats_add_queue(&qstats, sch->cpu_qstats, &sch->qstats); in qdisc_qstats_qlen_backlog()
982 qdisc_reset(sch); in qdisc_purge_queue()
1128 skb = sch->dequeue(sch); in qdisc_peek_dequeued()
1134 sch->q.qlen++; in qdisc_peek_dequeued()
1151 sch->q.qlen--; in qdisc_update_stats_at_dequeue()
1163 sch->q.qlen++; in qdisc_update_stats_at_enqueue()
1179 sch->q.qlen--; in qdisc_dequeue_peeked()
1182 skb = sch->dequeue(sch); in qdisc_dequeue_peeked()
[all …]

Completed in 51 milliseconds

12345