Lines Matching refs:trig
53 struct iio_trigger *trig = to_iio_trigger(dev); in iio_trigger_read_name() local
54 return sysfs_emit(buf, "%s\n", trig->name); in iio_trigger_read_name()
118 int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig) in iio_trigger_set_immutable() argument
122 if (!indio_dev || !trig) in iio_trigger_set_immutable()
129 indio_dev->trig = iio_trigger_get(trig); in iio_trigger_set_immutable()
151 struct iio_trigger *trig = NULL, *iter; in iio_trigger_acquire_by_name() local
156 trig = iter; in iio_trigger_acquire_by_name()
157 iio_trigger_get(trig); in iio_trigger_acquire_by_name()
162 return trig; in iio_trigger_acquire_by_name()
165 void iio_trigger_poll(struct iio_trigger *trig) in iio_trigger_poll() argument
169 if (!atomic_read(&trig->use_count)) { in iio_trigger_poll()
170 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_poll()
173 if (trig->subirqs[i].enabled) in iio_trigger_poll()
174 generic_handle_irq(trig->subirq_base + i); in iio_trigger_poll()
176 iio_trigger_notify_done(trig); in iio_trigger_poll()
189 void iio_trigger_poll_chained(struct iio_trigger *trig) in iio_trigger_poll_chained() argument
193 if (!atomic_read(&trig->use_count)) { in iio_trigger_poll_chained()
194 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_poll_chained()
197 if (trig->subirqs[i].enabled) in iio_trigger_poll_chained()
198 handle_nested_irq(trig->subirq_base + i); in iio_trigger_poll_chained()
200 iio_trigger_notify_done(trig); in iio_trigger_poll_chained()
206 void iio_trigger_notify_done(struct iio_trigger *trig) in iio_trigger_notify_done() argument
208 if (atomic_dec_and_test(&trig->use_count) && trig->ops && in iio_trigger_notify_done()
209 trig->ops->reenable) in iio_trigger_notify_done()
210 trig->ops->reenable(trig); in iio_trigger_notify_done()
215 static int iio_trigger_get_irq(struct iio_trigger *trig) in iio_trigger_get_irq() argument
219 mutex_lock(&trig->pool_lock); in iio_trigger_get_irq()
220 ret = bitmap_find_free_region(trig->pool, in iio_trigger_get_irq()
223 mutex_unlock(&trig->pool_lock); in iio_trigger_get_irq()
225 ret += trig->subirq_base; in iio_trigger_get_irq()
230 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) in iio_trigger_put_irq() argument
232 mutex_lock(&trig->pool_lock); in iio_trigger_put_irq()
233 clear_bit(irq - trig->subirq_base, trig->pool); in iio_trigger_put_irq()
234 mutex_unlock(&trig->pool_lock); in iio_trigger_put_irq()
244 int iio_trigger_attach_poll_func(struct iio_trigger *trig, in iio_trigger_attach_poll_func() argument
249 bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_attach_poll_func()
256 pf->irq = iio_trigger_get_irq(trig); in iio_trigger_attach_poll_func()
259 trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_attach_poll_func()
271 if (trig->ops && trig->ops->set_trigger_state && notinuse) { in iio_trigger_attach_poll_func()
272 ret = trig->ops->set_trigger_state(trig, true); in iio_trigger_attach_poll_func()
282 if (pf->indio_dev->dev.parent == trig->dev.parent) in iio_trigger_attach_poll_func()
283 trig->attached_own_device = true; in iio_trigger_attach_poll_func()
290 iio_trigger_put_irq(trig, pf->irq); in iio_trigger_attach_poll_func()
296 int iio_trigger_detach_poll_func(struct iio_trigger *trig, in iio_trigger_detach_poll_func() argument
301 bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1; in iio_trigger_detach_poll_func()
304 if (trig->ops && trig->ops->set_trigger_state && no_other_users) { in iio_trigger_detach_poll_func()
305 ret = trig->ops->set_trigger_state(trig, false); in iio_trigger_detach_poll_func()
309 if (pf->indio_dev->dev.parent == trig->dev.parent) in iio_trigger_detach_poll_func()
310 trig->attached_own_device = false; in iio_trigger_detach_poll_func()
311 iio_trigger_put_irq(trig, pf->irq); in iio_trigger_detach_poll_func()
383 if (indio_dev->trig) in iio_trigger_read_current()
384 return sysfs_emit(buf, "%s\n", indio_dev->trig->name); in iio_trigger_read_current()
409 struct iio_trigger *oldtrig = indio_dev->trig; in iio_trigger_write_current()
410 struct iio_trigger *trig; in iio_trigger_write_current() local
424 trig = iio_trigger_acquire_by_name(buf); in iio_trigger_write_current()
425 if (oldtrig == trig) { in iio_trigger_write_current()
430 if (trig && indio_dev->info->validate_trigger) { in iio_trigger_write_current()
431 ret = indio_dev->info->validate_trigger(indio_dev, trig); in iio_trigger_write_current()
436 if (trig && trig->ops && trig->ops->validate_device) { in iio_trigger_write_current()
437 ret = trig->ops->validate_device(trig, indio_dev); in iio_trigger_write_current()
442 indio_dev->trig = trig; in iio_trigger_write_current()
450 if (indio_dev->trig) { in iio_trigger_write_current()
452 iio_trigger_attach_poll_func(indio_dev->trig, in iio_trigger_write_current()
459 if (trig) in iio_trigger_write_current()
460 iio_trigger_put(trig); in iio_trigger_write_current()
480 struct iio_trigger *trig = to_iio_trigger(device); in iio_trig_release() local
483 if (trig->subirq_base) { in iio_trig_release()
485 irq_modify_status(trig->subirq_base + i, in iio_trig_release()
488 irq_set_chip(trig->subirq_base + i, in iio_trig_release()
490 irq_set_handler(trig->subirq_base + i, in iio_trig_release()
494 irq_free_descs(trig->subirq_base, in iio_trig_release()
497 kfree(trig->name); in iio_trig_release()
498 kfree(trig); in iio_trig_release()
509 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); in iio_trig_subirqmask() local
511 trig->subirqs[d->irq - trig->subirq_base].enabled = false; in iio_trig_subirqmask()
517 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); in iio_trig_subirqunmask() local
519 trig->subirqs[d->irq - trig->subirq_base].enabled = true; in iio_trig_subirqunmask()
527 struct iio_trigger *trig; in viio_trigger_alloc() local
530 trig = kzalloc(sizeof *trig, GFP_KERNEL); in viio_trigger_alloc()
531 if (!trig) in viio_trigger_alloc()
534 trig->dev.parent = parent; in viio_trigger_alloc()
535 trig->dev.type = &iio_trig_type; in viio_trigger_alloc()
536 trig->dev.bus = &iio_bus_type; in viio_trigger_alloc()
537 device_initialize(&trig->dev); in viio_trigger_alloc()
539 mutex_init(&trig->pool_lock); in viio_trigger_alloc()
540 trig->subirq_base = irq_alloc_descs(-1, 0, in viio_trigger_alloc()
543 if (trig->subirq_base < 0) in viio_trigger_alloc()
546 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); in viio_trigger_alloc()
547 if (trig->name == NULL) in viio_trigger_alloc()
550 trig->subirq_chip.name = trig->name; in viio_trigger_alloc()
551 trig->subirq_chip.irq_mask = &iio_trig_subirqmask; in viio_trigger_alloc()
552 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; in viio_trigger_alloc()
554 irq_set_chip(trig->subirq_base + i, &trig->subirq_chip); in viio_trigger_alloc()
555 irq_set_handler(trig->subirq_base + i, &handle_simple_irq); in viio_trigger_alloc()
556 irq_modify_status(trig->subirq_base + i, in viio_trigger_alloc()
560 return trig; in viio_trigger_alloc()
563 irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in viio_trigger_alloc()
565 kfree(trig); in viio_trigger_alloc()
581 struct iio_trigger *trig; in iio_trigger_alloc() local
585 trig = viio_trigger_alloc(parent, fmt, vargs); in iio_trigger_alloc()
588 return trig; in iio_trigger_alloc()
592 void iio_trigger_free(struct iio_trigger *trig) in iio_trigger_free() argument
594 if (trig) in iio_trigger_free()
595 put_device(&trig->dev); in iio_trigger_free()
620 struct iio_trigger **ptr, *trig; in devm_iio_trigger_alloc() local
630 trig = viio_trigger_alloc(parent, fmt, vargs); in devm_iio_trigger_alloc()
632 if (trig) { in devm_iio_trigger_alloc()
633 *ptr = trig; in devm_iio_trigger_alloc()
639 return trig; in devm_iio_trigger_alloc()
678 return indio_dev->trig->attached_own_device; in iio_trigger_using_own()
694 int iio_trigger_validate_own_device(struct iio_trigger *trig, in iio_trigger_validate_own_device() argument
697 if (indio_dev->dev.parent != trig->dev.parent) in iio_trigger_validate_own_device()
712 if (indio_dev->trig) in iio_device_unregister_trigger_consumer()
713 iio_trigger_put(indio_dev->trig); in iio_device_unregister_trigger_consumer()