Lines Matching refs:md
111 struct mport_dev *md; member
179 struct mport_dev *md; member
243 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_rd()
288 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_wr()
341 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_outbound_mapping() argument
345 struct rio_mport *mport = md->mport; in rio_mport_create_outbound_mapping()
365 map->md = md; in rio_mport_create_outbound_mapping()
367 list_add_tail(&map->node, &md->mappings); in rio_mport_create_outbound_mapping()
375 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_outbound_mapping() argument
382 mutex_lock(&md->buf_mutex); in rio_mport_get_outbound_mapping()
383 list_for_each_entry(map, &md->mappings, node) { in rio_mport_get_outbound_mapping()
401 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, in rio_mport_get_outbound_mapping()
403 mutex_unlock(&md->buf_mutex); in rio_mport_get_outbound_mapping()
410 struct mport_dev *data = priv->md; in rio_mport_obw_map()
444 struct mport_dev *md = priv->md; in rio_mport_obw_free() local
448 if (!md->mport->ops->unmap_outb) in rio_mport_obw_free()
456 mutex_lock(&md->buf_mutex); in rio_mport_obw_free()
457 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_obw_free()
467 mutex_unlock(&md->buf_mutex); in rio_mport_obw_free()
479 struct mport_dev *md = priv->md; in maint_hdid_set() local
485 md->mport->host_deviceid = hdid; in maint_hdid_set()
486 md->properties.hdid = hdid; in maint_hdid_set()
487 rio_local_set_device_id(md->mport, hdid); in maint_hdid_set()
501 struct mport_dev *md = priv->md; in maint_comptag_set() local
507 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); in maint_comptag_set()
535 struct mport_dev *md = in mport_release_def_dma() local
538 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); in mport_release_def_dma()
539 rio_release_dma(md->dma_chan); in mport_release_def_dma()
540 md->dma_chan = NULL; in mport_release_def_dma()
567 mutex_lock(&req->map->md->buf_mutex); in dma_req_free()
569 mutex_unlock(&req->map->md->buf_mutex); in dma_req_free()
637 priv->dmach = rio_request_mport_dma(priv->md->mport); in get_dma_channel()
640 if (priv->md->dma_chan) { in get_dma_channel()
641 priv->dmach = priv->md->dma_chan; in get_dma_channel()
642 kref_get(&priv->md->dma_ref); in get_dma_channel()
648 } else if (!priv->md->dma_chan) { in get_dma_channel()
650 priv->md->dma_chan = priv->dmach; in get_dma_channel()
651 kref_init(&priv->md->dma_ref); in get_dma_channel()
796 struct mport_dev *md = priv->md; in rio_dma_transfer() local
884 mutex_lock(&md->buf_mutex); in rio_dma_transfer()
885 list_for_each_entry(map, &md->mappings, node) { in rio_dma_transfer()
893 mutex_unlock(&md->buf_mutex); in rio_dma_transfer()
959 priv->md->properties.transfer_mode) == 0) in rio_mport_transfer_ioctl()
1068 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_dma_mapping() argument
1077 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, in rio_mport_create_dma_mapping()
1087 map->md = md; in rio_mport_create_dma_mapping()
1089 mutex_lock(&md->buf_mutex); in rio_mport_create_dma_mapping()
1090 list_add_tail(&map->node, &md->mappings); in rio_mport_create_dma_mapping()
1091 mutex_unlock(&md->buf_mutex); in rio_mport_create_dma_mapping()
1100 struct mport_dev *md = priv->md; in rio_mport_alloc_dma() local
1108 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); in rio_mport_alloc_dma()
1115 mutex_lock(&md->buf_mutex); in rio_mport_alloc_dma()
1117 mutex_unlock(&md->buf_mutex); in rio_mport_alloc_dma()
1127 struct mport_dev *md = priv->md; in rio_mport_free_dma() local
1136 mutex_lock(&md->buf_mutex); in rio_mport_free_dma()
1137 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_free_dma()
1145 mutex_unlock(&md->buf_mutex); in rio_mport_free_dma()
1181 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_inbound_mapping() argument
1185 struct rio_mport *mport = md->mport; in rio_mport_create_inbound_mapping()
1214 map->md = md; in rio_mport_create_inbound_mapping()
1216 mutex_lock(&md->buf_mutex); in rio_mport_create_inbound_mapping()
1217 list_add_tail(&map->node, &md->mappings); in rio_mport_create_inbound_mapping()
1218 mutex_unlock(&md->buf_mutex); in rio_mport_create_inbound_mapping()
1231 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_inbound_mapping() argument
1241 mutex_lock(&md->buf_mutex); in rio_mport_get_inbound_mapping()
1242 list_for_each_entry(map, &md->mappings, node) { in rio_mport_get_inbound_mapping()
1256 mutex_unlock(&md->buf_mutex); in rio_mport_get_inbound_mapping()
1262 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); in rio_mport_get_inbound_mapping()
1268 struct mport_dev *md = priv->md; in rio_mport_map_inbound() local
1273 if (!md->mport->ops->map_inb) in rio_mport_map_inbound()
1278 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); in rio_mport_map_inbound()
1280 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, in rio_mport_map_inbound()
1291 mutex_lock(&md->buf_mutex); in rio_mport_map_inbound()
1293 mutex_unlock(&md->buf_mutex); in rio_mport_map_inbound()
1310 struct mport_dev *md = priv->md; in rio_mport_inbound_free() local
1314 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); in rio_mport_inbound_free()
1316 if (!md->mport->ops->unmap_inb) in rio_mport_inbound_free()
1322 mutex_lock(&md->buf_mutex); in rio_mport_inbound_free()
1323 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_inbound_free()
1332 mutex_unlock(&md->buf_mutex); in rio_mport_inbound_free()
1344 struct mport_dev *md = priv->md; in maint_port_idx_get() local
1345 u32 port_idx = md->mport->index; in maint_port_idx_get()
1372 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); in rio_mport_add_event()
1415 struct mport_dev *md = priv->md; in rio_mport_add_db_filter() local
1427 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, in rio_mport_add_db_filter()
1431 dev_name(&md->dev), ret); in rio_mport_add_db_filter()
1437 rio_release_inb_dbell(md->mport, filter.low, filter.high); in rio_mport_add_db_filter()
1443 spin_lock_irqsave(&md->db_lock, flags); in rio_mport_add_db_filter()
1445 list_add_tail(&db_filter->data_node, &md->doorbells); in rio_mport_add_db_filter()
1446 spin_unlock_irqrestore(&md->db_lock, flags); in rio_mport_add_db_filter()
1472 spin_lock_irqsave(&priv->md->db_lock, flags); in rio_mport_remove_db_filter()
1482 spin_unlock_irqrestore(&priv->md->db_lock, flags); in rio_mport_remove_db_filter()
1485 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); in rio_mport_remove_db_filter()
1502 struct mport_dev *md = context; in rio_mport_pw_handler() local
1512 spin_lock(&md->pw_lock); in rio_mport_pw_handler()
1513 list_for_each_entry(pw_filter, &md->portwrites, md_node) { in rio_mport_pw_handler()
1520 spin_unlock(&md->pw_lock); in rio_mport_pw_handler()
1534 struct mport_dev *md = priv->md; in rio_mport_add_pw_filter() local
1549 spin_lock_irqsave(&md->pw_lock, flags); in rio_mport_add_pw_filter()
1550 if (list_empty(&md->portwrites)) in rio_mport_add_pw_filter()
1553 list_add_tail(&pw_filter->md_node, &md->portwrites); in rio_mport_add_pw_filter()
1554 spin_unlock_irqrestore(&md->pw_lock, flags); in rio_mport_add_pw_filter()
1559 ret = rio_add_mport_pw_handler(md->mport, md, in rio_mport_add_pw_filter()
1562 dev_err(&md->dev, in rio_mport_add_pw_filter()
1567 rio_pw_enable(md->mport, 1); in rio_mport_add_pw_filter()
1591 struct mport_dev *md = priv->md; in rio_mport_remove_pw_filter() local
1601 spin_lock_irqsave(&md->pw_lock, flags); in rio_mport_remove_pw_filter()
1610 if (list_empty(&md->portwrites)) in rio_mport_remove_pw_filter()
1612 spin_unlock_irqrestore(&md->pw_lock, flags); in rio_mport_remove_pw_filter()
1615 rio_del_mport_pw_handler(md->mport, priv->md, in rio_mport_remove_pw_filter()
1617 rio_pw_enable(md->mport, 0); in rio_mport_remove_pw_filter()
1661 struct mport_dev *md = priv->md; in rio_mport_add_riodev() local
1689 mport = md->mport; in rio_mport_add_riodev()
1815 mport = priv->md->mport; in rio_mport_del_riodev()
1888 priv->md = chdev; in mport_cdev_open()
1932 struct mport_dev *md; in mport_cdev_release_dma() local
1946 md = priv->md; in mport_cdev_release_dma()
1975 if (priv->dmach != priv->md->dma_chan) { in mport_cdev_release_dma()
1981 kref_put(&md->dma_ref, mport_release_def_dma); in mport_cdev_release_dma()
2002 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); in mport_cdev_release()
2004 chdev = priv->md; in mport_cdev_release()
2054 struct mport_dev *md = data->md; in mport_cdev_ioctl() local
2056 if (atomic_read(&md->active) == 0) in mport_cdev_ioctl()
2075 md->properties.hdid = md->mport->host_deviceid; in mport_cdev_ioctl()
2076 if (copy_to_user((void __user *)arg, &(md->properties), in mport_cdev_ioctl()
2077 sizeof(md->properties))) in mport_cdev_ioctl()
2133 struct rio_mport *mport = map->md->mport; in mport_release_mapping()
2169 mutex_lock(&map->md->buf_mutex); in mport_mm_close()
2171 mutex_unlock(&map->md->buf_mutex); in mport_mm_close()
2182 struct mport_dev *md; in mport_cdev_mmap() local
2192 md = priv->md; in mport_cdev_mmap()
2195 mutex_lock(&md->buf_mutex); in mport_cdev_mmap()
2196 list_for_each_entry(map, &md->mappings, node) { in mport_cdev_mmap()
2203 mutex_unlock(&md->buf_mutex); in mport_cdev_mmap()
2217 ret = dma_mmap_coherent(md->mport->dev.parent, vma, in mport_cdev_mmap()
2286 struct rio_mport *mport = priv->md->mport; in mport_write()
2335 struct mport_dev *md; in mport_device_release() local
2338 md = container_of(dev, struct mport_dev, dev); in mport_device_release()
2339 kfree(md); in mport_device_release()
2349 struct mport_dev *md; in mport_cdev_add() local
2352 md = kzalloc(sizeof(*md), GFP_KERNEL); in mport_cdev_add()
2353 if (!md) { in mport_cdev_add()
2358 md->mport = mport; in mport_cdev_add()
2359 mutex_init(&md->buf_mutex); in mport_cdev_add()
2360 mutex_init(&md->file_mutex); in mport_cdev_add()
2361 INIT_LIST_HEAD(&md->file_list); in mport_cdev_add()
2363 device_initialize(&md->dev); in mport_cdev_add()
2364 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id); in mport_cdev_add()
2365 md->dev.class = &dev_class; in mport_cdev_add()
2366 md->dev.parent = &mport->dev; in mport_cdev_add()
2367 md->dev.release = mport_device_release; in mport_cdev_add()
2368 dev_set_name(&md->dev, DEV_NAME "%d", mport->id); in mport_cdev_add()
2369 atomic_set(&md->active, 1); in mport_cdev_add()
2371 cdev_init(&md->cdev, &mport_fops); in mport_cdev_add()
2372 md->cdev.owner = THIS_MODULE; in mport_cdev_add()
2374 INIT_LIST_HEAD(&md->doorbells); in mport_cdev_add()
2375 spin_lock_init(&md->db_lock); in mport_cdev_add()
2376 INIT_LIST_HEAD(&md->portwrites); in mport_cdev_add()
2377 spin_lock_init(&md->pw_lock); in mport_cdev_add()
2378 INIT_LIST_HEAD(&md->mappings); in mport_cdev_add()
2380 md->properties.id = mport->id; in mport_cdev_add()
2381 md->properties.sys_size = mport->sys_size; in mport_cdev_add()
2382 md->properties.hdid = mport->host_deviceid; in mport_cdev_add()
2383 md->properties.index = mport->index; in mport_cdev_add()
2389 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; in mport_cdev_add()
2391 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; in mport_cdev_add()
2394 ret = cdev_device_add(&md->cdev, &md->dev); in mport_cdev_add()
2402 md->properties.flags = attr.flags; in mport_cdev_add()
2403 md->properties.link_speed = attr.link_speed; in mport_cdev_add()
2404 md->properties.link_width = attr.link_width; in mport_cdev_add()
2405 md->properties.dma_max_sge = attr.dma_max_sge; in mport_cdev_add()
2406 md->properties.dma_max_size = attr.dma_max_size; in mport_cdev_add()
2407 md->properties.dma_align = attr.dma_align; in mport_cdev_add()
2408 md->properties.cap_sys_size = 0; in mport_cdev_add()
2409 md->properties.cap_transfer_mode = 0; in mport_cdev_add()
2410 md->properties.cap_addr_size = 0; in mport_cdev_add()
2416 list_add_tail(&md->node, &mport_devs); in mport_cdev_add()
2422 return md; in mport_cdev_add()
2425 put_device(&md->dev); in mport_cdev_add()
2433 static void mport_cdev_terminate_dma(struct mport_dev *md) in mport_cdev_terminate_dma() argument
2438 rmcd_debug(DMA, "%s", dev_name(&md->dev)); in mport_cdev_terminate_dma()
2440 mutex_lock(&md->file_mutex); in mport_cdev_terminate_dma()
2441 list_for_each_entry(client, &md->file_list, list) { in mport_cdev_terminate_dma()
2447 mutex_unlock(&md->file_mutex); in mport_cdev_terminate_dma()
2449 if (md->dma_chan) { in mport_cdev_terminate_dma()
2450 dmaengine_terminate_all(md->dma_chan); in mport_cdev_terminate_dma()
2451 rio_release_dma(md->dma_chan); in mport_cdev_terminate_dma()
2452 md->dma_chan = NULL; in mport_cdev_terminate_dma()
2462 static int mport_cdev_kill_fasync(struct mport_dev *md) in mport_cdev_kill_fasync() argument
2467 mutex_lock(&md->file_mutex); in mport_cdev_kill_fasync()
2468 list_for_each_entry(client, &md->file_list, list) { in mport_cdev_kill_fasync()
2473 mutex_unlock(&md->file_mutex); in mport_cdev_kill_fasync()
2481 static void mport_cdev_remove(struct mport_dev *md) in mport_cdev_remove() argument
2485 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); in mport_cdev_remove()
2486 atomic_set(&md->active, 0); in mport_cdev_remove()
2487 mport_cdev_terminate_dma(md); in mport_cdev_remove()
2488 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); in mport_cdev_remove()
2489 cdev_device_del(&md->cdev, &md->dev); in mport_cdev_remove()
2490 mport_cdev_kill_fasync(md); in mport_cdev_remove()
2500 mutex_lock(&md->buf_mutex); in mport_cdev_remove()
2501 list_for_each_entry_safe(map, _map, &md->mappings, node) { in mport_cdev_remove()
2504 mutex_unlock(&md->buf_mutex); in mport_cdev_remove()
2506 if (!list_empty(&md->mappings)) in mport_cdev_remove()
2508 md->mport->name); in mport_cdev_remove()
2510 rio_release_inb_dbell(md->mport, 0, 0x0fff); in mport_cdev_remove()
2512 put_device(&md->dev); in mport_cdev_remove()