Lines Matching refs:idev
218 struct uio_device *idev = dev_get_drvdata(dev); in name_show() local
221 mutex_lock(&idev->info_lock); in name_show()
222 if (!idev->info) { in name_show()
228 ret = sprintf(buf, "%s\n", idev->info->name); in name_show()
231 mutex_unlock(&idev->info_lock); in name_show()
239 struct uio_device *idev = dev_get_drvdata(dev); in version_show() local
242 mutex_lock(&idev->info_lock); in version_show()
243 if (!idev->info) { in version_show()
249 ret = sprintf(buf, "%s\n", idev->info->version); in version_show()
252 mutex_unlock(&idev->info_lock); in version_show()
260 struct uio_device *idev = dev_get_drvdata(dev); in event_show() local
261 return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); in event_show()
284 static int uio_dev_add_attributes(struct uio_device *idev) in uio_dev_add_attributes() argument
296 mem = &idev->info->mem[mi]; in uio_dev_add_attributes()
301 idev->map_dir = kobject_create_and_add("maps", in uio_dev_add_attributes()
302 &idev->dev.kobj); in uio_dev_add_attributes()
303 if (!idev->map_dir) { in uio_dev_add_attributes()
316 ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); in uio_dev_add_attributes()
325 port = &idev->info->port[pi]; in uio_dev_add_attributes()
330 idev->portio_dir = kobject_create_and_add("portio", in uio_dev_add_attributes()
331 &idev->dev.kobj); in uio_dev_add_attributes()
332 if (!idev->portio_dir) { in uio_dev_add_attributes()
345 ret = kobject_add(&portio->kobj, idev->portio_dir, in uio_dev_add_attributes()
360 port = &idev->info->port[pi]; in uio_dev_add_attributes()
364 kobject_put(idev->portio_dir); in uio_dev_add_attributes()
369 mem = &idev->info->mem[mi]; in uio_dev_add_attributes()
373 kobject_put(idev->map_dir); in uio_dev_add_attributes()
374 dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret); in uio_dev_add_attributes()
378 static void uio_dev_del_attributes(struct uio_device *idev) in uio_dev_del_attributes() argument
385 mem = &idev->info->mem[i]; in uio_dev_del_attributes()
390 kobject_put(idev->map_dir); in uio_dev_del_attributes()
393 port = &idev->info->port[i]; in uio_dev_del_attributes()
398 kobject_put(idev->portio_dir); in uio_dev_del_attributes()
401 static int uio_get_minor(struct uio_device *idev) in uio_get_minor() argument
406 retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL); in uio_get_minor()
408 idev->minor = retval; in uio_get_minor()
411 dev_err(&idev->dev, "too many uio devices\n"); in uio_get_minor()
431 struct uio_device *idev = info->uio_dev; in uio_event_notify() local
433 atomic_inc(&idev->event); in uio_event_notify()
434 wake_up_interruptible(&idev->wait); in uio_event_notify()
435 kill_fasync(&idev->async_queue, SIGIO, POLL_IN); in uio_event_notify()
446 struct uio_device *idev = (struct uio_device *)dev_id; in uio_interrupt() local
449 ret = idev->info->handler(irq, idev->info); in uio_interrupt()
451 uio_event_notify(idev->info); in uio_interrupt()
463 struct uio_device *idev; in uio_open() local
468 idev = idr_find(&uio_idr, iminor(inode)); in uio_open()
470 if (!idev) { in uio_open()
475 get_device(&idev->dev); in uio_open()
477 if (!try_module_get(idev->owner)) { in uio_open()
488 listener->dev = idev; in uio_open()
489 listener->event_count = atomic_read(&idev->event); in uio_open()
492 mutex_lock(&idev->info_lock); in uio_open()
493 if (!idev->info) { in uio_open()
494 mutex_unlock(&idev->info_lock); in uio_open()
499 if (idev->info->open) in uio_open()
500 ret = idev->info->open(idev->info, inode); in uio_open()
501 mutex_unlock(&idev->info_lock); in uio_open()
511 module_put(idev->owner); in uio_open()
514 put_device(&idev->dev); in uio_open()
523 struct uio_device *idev = listener->dev; in uio_fasync() local
525 return fasync_helper(fd, filep, on, &idev->async_queue); in uio_fasync()
532 struct uio_device *idev = listener->dev; in uio_release() local
534 mutex_lock(&idev->info_lock); in uio_release()
535 if (idev->info && idev->info->release) in uio_release()
536 ret = idev->info->release(idev->info, inode); in uio_release()
537 mutex_unlock(&idev->info_lock); in uio_release()
539 module_put(idev->owner); in uio_release()
541 put_device(&idev->dev); in uio_release()
548 struct uio_device *idev = listener->dev; in uio_poll() local
551 mutex_lock(&idev->info_lock); in uio_poll()
552 if (!idev->info || !idev->info->irq) in uio_poll()
554 mutex_unlock(&idev->info_lock); in uio_poll()
559 poll_wait(filep, &idev->wait, wait); in uio_poll()
560 if (listener->event_count != atomic_read(&idev->event)) in uio_poll()
569 struct uio_device *idev = listener->dev; in uio_read() local
577 add_wait_queue(&idev->wait, &wait); in uio_read()
580 mutex_lock(&idev->info_lock); in uio_read()
581 if (!idev->info || !idev->info->irq) { in uio_read()
583 mutex_unlock(&idev->info_lock); in uio_read()
586 mutex_unlock(&idev->info_lock); in uio_read()
590 event_count = atomic_read(&idev->event); in uio_read()
615 remove_wait_queue(&idev->wait, &wait); in uio_read()
624 struct uio_device *idev = listener->dev; in uio_write() local
634 mutex_lock(&idev->info_lock); in uio_write()
635 if (!idev->info) { in uio_write()
640 if (!idev->info->irq) { in uio_write()
645 if (!idev->info->irqcontrol) { in uio_write()
650 retval = idev->info->irqcontrol(idev->info, irq_on); in uio_write()
653 mutex_unlock(&idev->info_lock); in uio_write()
659 struct uio_device *idev = vma->vm_private_data; in uio_find_mem_index() local
662 if (idev->info->mem[vma->vm_pgoff].size == 0) in uio_find_mem_index()
671 struct uio_device *idev = vmf->vma->vm_private_data; in uio_vma_fault() local
678 mutex_lock(&idev->info_lock); in uio_vma_fault()
679 if (!idev->info) { in uio_vma_fault()
696 addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset; in uio_vma_fault()
697 if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) in uio_vma_fault()
705 mutex_unlock(&idev->info_lock); in uio_vma_fault()
729 struct uio_device *idev = vma->vm_private_data; in uio_mmap_physical() local
735 mem = idev->info->mem + mi; in uio_mmap_physical()
743 if (idev->info->mem[mi].memtype == UIO_MEM_PHYS) in uio_mmap_physical()
765 struct uio_device *idev = listener->dev; in uio_mmap() local
773 vma->vm_private_data = idev; in uio_mmap()
775 mutex_lock(&idev->info_lock); in uio_mmap()
776 if (!idev->info) { in uio_mmap()
788 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) in uio_mmap()
789 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; in uio_mmap()
795 if (idev->info->mmap) { in uio_mmap()
796 ret = idev->info->mmap(idev->info, vma); in uio_mmap()
800 switch (idev->info->mem[mi].memtype) { in uio_mmap()
814 mutex_unlock(&idev->info_lock); in uio_mmap()
905 struct uio_device *idev = dev_get_drvdata(dev); in uio_device_release() local
907 kfree(idev); in uio_device_release()
922 struct uio_device *idev; in __uio_register_device() local
933 idev = kzalloc(sizeof(*idev), GFP_KERNEL); in __uio_register_device()
934 if (!idev) { in __uio_register_device()
938 idev->owner = owner; in __uio_register_device()
939 idev->info = info; in __uio_register_device()
940 mutex_init(&idev->info_lock); in __uio_register_device()
941 init_waitqueue_head(&idev->wait); in __uio_register_device()
942 atomic_set(&idev->event, 0); in __uio_register_device()
944 ret = uio_get_minor(idev); in __uio_register_device()
946 kfree(idev); in __uio_register_device()
950 device_initialize(&idev->dev); in __uio_register_device()
951 idev->dev.devt = MKDEV(uio_major, idev->minor); in __uio_register_device()
952 idev->dev.class = &uio_class; in __uio_register_device()
953 idev->dev.parent = parent; in __uio_register_device()
954 idev->dev.release = uio_device_release; in __uio_register_device()
955 dev_set_drvdata(&idev->dev, idev); in __uio_register_device()
957 ret = dev_set_name(&idev->dev, "uio%d", idev->minor); in __uio_register_device()
961 ret = device_add(&idev->dev); in __uio_register_device()
965 ret = uio_dev_add_attributes(idev); in __uio_register_device()
969 info->uio_dev = idev; in __uio_register_device()
981 info->irq_flags, info->name, idev); in __uio_register_device()
991 uio_dev_del_attributes(idev); in __uio_register_device()
993 device_del(&idev->dev); in __uio_register_device()
995 uio_free_minor(idev->minor); in __uio_register_device()
996 put_device(&idev->dev); in __uio_register_device()
1046 struct uio_device *idev; in uio_unregister_device() local
1052 idev = info->uio_dev; in uio_unregister_device()
1053 minor = idev->minor; in uio_unregister_device()
1055 mutex_lock(&idev->info_lock); in uio_unregister_device()
1056 uio_dev_del_attributes(idev); in uio_unregister_device()
1059 free_irq(info->irq, idev); in uio_unregister_device()
1061 idev->info = NULL; in uio_unregister_device()
1062 mutex_unlock(&idev->info_lock); in uio_unregister_device()
1064 wake_up_interruptible(&idev->wait); in uio_unregister_device()
1065 kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); in uio_unregister_device()
1067 device_unregister(&idev->dev); in uio_unregister_device()