Lines Matching refs:lu
142 static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay) in sbp2_queue_work() argument
144 queue_delayed_work(fw_workqueue, &lu->work, delay); in sbp2_queue_work()
179 static const struct device *lu_dev(const struct sbp2_logical_unit *lu) in lu_dev() argument
181 return &lu->tgt->unit->device; in lu_dev()
261 struct sbp2_logical_unit *lu; member
410 struct sbp2_logical_unit *lu = callback_data; in sbp2_status_write() local
428 dev_notice(lu_dev(lu), in sbp2_status_write()
435 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_status_write()
436 list_for_each_entry(orb, &lu->orb_list, link) { in sbp2_status_write()
444 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_status_write()
446 if (&orb->link != &lu->orb_list) { in sbp2_status_write()
450 dev_err(lu_dev(lu), "status write for unknown ORB\n"); in sbp2_status_write()
471 spin_lock_irqsave(&orb->lu->tgt->lock, flags); in complete_transaction()
477 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
482 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
488 static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, in sbp2_send_orb() argument
491 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_orb()
498 orb->lu = lu; in sbp2_send_orb()
499 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_send_orb()
500 list_add_tail(&orb->link, &lu->orb_list); in sbp2_send_orb()
501 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_send_orb()
511 static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu) in sbp2_cancel_orbs() argument
513 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_cancel_orbs()
519 spin_lock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
520 list_splice_init(&lu->orb_list, &list); in sbp2_cancel_orbs()
521 spin_unlock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
547 static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, in sbp2_send_management_orb() argument
551 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_management_orb()
581 cpu_to_be32(lu->address_handler.offset >> 32); in sbp2_send_management_orb()
583 cpu_to_be32(lu->address_handler.offset); in sbp2_send_management_orb()
590 timeout = lu->tgt->mgt_orb_timeout; in sbp2_send_management_orb()
604 sbp2_send_orb(&orb->base, lu, node_id, generation, in sbp2_send_management_orb()
605 lu->tgt->management_agent_address); in sbp2_send_management_orb()
610 if (sbp2_cancel_orbs(lu) == 0) { in sbp2_send_management_orb()
611 dev_err(lu_dev(lu), "ORB reply timed out, rcode 0x%02x\n", in sbp2_send_management_orb()
617 dev_err(lu_dev(lu), "management write failed, rcode 0x%02x\n", in sbp2_send_management_orb()
624 dev_err(lu_dev(lu), "error status: %d:%d\n", in sbp2_send_management_orb()
645 static void sbp2_agent_reset(struct sbp2_logical_unit *lu) in sbp2_agent_reset() argument
647 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset()
651 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset()
652 lu->command_block_agent_address + SBP2_AGENT_RESET, in sbp2_agent_reset()
662 static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) in sbp2_agent_reset_no_wait() argument
664 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset_no_wait()
673 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset_no_wait()
674 lu->command_block_agent_address + SBP2_AGENT_RESET, in sbp2_agent_reset_no_wait()
695 static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) in sbp2_conditionally_block() argument
697 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_block()
704 if (!tgt->dont_block && !lu->blocked && in sbp2_conditionally_block()
705 lu->generation != card->generation) { in sbp2_conditionally_block()
706 lu->blocked = true; in sbp2_conditionally_block()
719 static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) in sbp2_conditionally_unblock() argument
721 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_unblock()
728 if (lu->blocked && lu->generation == card->generation) { in sbp2_conditionally_unblock()
729 lu->blocked = false; in sbp2_conditionally_unblock()
783 static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) in sbp2_set_busy_timeout() argument
785 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_set_busy_timeout()
789 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_set_busy_timeout()
797 struct sbp2_logical_unit *lu = in sbp2_login() local
799 struct sbp2_target *tgt = lu->tgt; in sbp2_login()
815 if (lu->has_sdev) in sbp2_login()
816 sbp2_send_management_orb(lu, device->node_id, generation, in sbp2_login()
817 SBP2_LOGOUT_REQUEST, lu->login_id, NULL); in sbp2_login()
819 if (sbp2_send_management_orb(lu, node_id, generation, in sbp2_login()
820 SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { in sbp2_login()
821 if (lu->retries++ < 5) { in sbp2_login()
822 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); in sbp2_login()
825 lu->lun); in sbp2_login()
827 sbp2_unblock(lu->tgt); in sbp2_login()
835 lu->generation = generation; in sbp2_login()
837 lu->command_block_agent_address = in sbp2_login()
840 lu->login_id = be32_to_cpu(response.misc) & 0xffff; in sbp2_login()
843 lu->lun, lu->retries); in sbp2_login()
846 sbp2_set_busy_timeout(lu); in sbp2_login()
848 lu->workfn = sbp2_reconnect; in sbp2_login()
849 sbp2_agent_reset(lu); in sbp2_login()
852 if (lu->has_sdev) { in sbp2_login()
853 sbp2_cancel_orbs(lu); in sbp2_login()
854 sbp2_conditionally_unblock(lu); in sbp2_login()
859 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) in sbp2_login()
863 sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); in sbp2_login()
885 lu->has_sdev = true; in sbp2_login()
896 sbp2_send_management_orb(lu, device->node_id, generation, in sbp2_login()
897 SBP2_LOGOUT_REQUEST, lu->login_id, NULL); in sbp2_login()
902 lu->workfn = sbp2_login; in sbp2_login()
907 struct sbp2_logical_unit *lu = in sbp2_reconnect() local
909 struct sbp2_target *tgt = lu->tgt; in sbp2_reconnect()
921 if (sbp2_send_management_orb(lu, node_id, generation, in sbp2_reconnect()
923 lu->login_id, NULL) < 0) { in sbp2_reconnect()
933 lu->retries++ >= 5) { in sbp2_reconnect()
935 lu->retries = 0; in sbp2_reconnect()
936 lu->workfn = sbp2_login; in sbp2_reconnect()
938 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); in sbp2_reconnect()
946 lu->generation = generation; in sbp2_reconnect()
949 lu->lun, lu->retries); in sbp2_reconnect()
951 sbp2_agent_reset(lu); in sbp2_reconnect()
952 sbp2_cancel_orbs(lu); in sbp2_reconnect()
953 sbp2_conditionally_unblock(lu); in sbp2_reconnect()
958 struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), in sbp2_lu_workfn() local
960 lu->workfn(work); in sbp2_lu_workfn()
965 struct sbp2_logical_unit *lu; in sbp2_add_logical_unit() local
967 lu = kmalloc(sizeof(*lu), GFP_KERNEL); in sbp2_add_logical_unit()
968 if (!lu) in sbp2_add_logical_unit()
971 lu->address_handler.length = 0x100; in sbp2_add_logical_unit()
972 lu->address_handler.address_callback = sbp2_status_write; in sbp2_add_logical_unit()
973 lu->address_handler.callback_data = lu; in sbp2_add_logical_unit()
975 if (fw_core_add_address_handler(&lu->address_handler, in sbp2_add_logical_unit()
977 kfree(lu); in sbp2_add_logical_unit()
981 lu->tgt = tgt; in sbp2_add_logical_unit()
982 lu->lun = lun_entry & 0xffff; in sbp2_add_logical_unit()
983 lu->login_id = INVALID_LOGIN_ID; in sbp2_add_logical_unit()
984 lu->retries = 0; in sbp2_add_logical_unit()
985 lu->has_sdev = false; in sbp2_add_logical_unit()
986 lu->blocked = false; in sbp2_add_logical_unit()
988 INIT_LIST_HEAD(&lu->orb_list); in sbp2_add_logical_unit()
989 lu->workfn = sbp2_login; in sbp2_add_logical_unit()
990 INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); in sbp2_add_logical_unit()
992 list_add_tail(&lu->link, &tgt->lu_list); in sbp2_add_logical_unit()
1126 struct sbp2_logical_unit *lu; in sbp2_probe() local
1178 list_for_each_entry(lu, &tgt->lu_list, link) in sbp2_probe()
1179 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); in sbp2_probe()
1195 struct sbp2_logical_unit *lu; in sbp2_update() local
1203 list_for_each_entry(lu, &tgt->lu_list, link) { in sbp2_update()
1204 sbp2_conditionally_block(lu); in sbp2_update()
1205 lu->retries = 0; in sbp2_update()
1206 sbp2_queue_work(lu, 0); in sbp2_update()
1214 struct sbp2_logical_unit *lu, *next; in sbp2_remove() local
1222 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { in sbp2_remove()
1223 cancel_delayed_work_sync(&lu->work); in sbp2_remove()
1224 sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); in sbp2_remove()
1229 if (lu->login_id != INVALID_LOGIN_ID) { in sbp2_remove()
1239 sbp2_send_management_orb(lu, node_id, generation, in sbp2_remove()
1241 lu->login_id, NULL); in sbp2_remove()
1243 fw_core_remove_address_handler(&lu->address_handler); in sbp2_remove()
1244 list_del(&lu->link); in sbp2_remove()
1245 kfree(lu); in sbp2_remove()
1339 struct fw_device *device = target_parent_device(base_orb->lu->tgt); in complete_command_orb()
1344 sbp2_agent_reset_no_wait(base_orb->lu); in complete_command_orb()
1370 sbp2_conditionally_block(base_orb->lu); in complete_command_orb()
1382 struct fw_device *device, struct sbp2_logical_unit *lu) in sbp2_map_scatterlist() argument
1400 cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1426 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1444 struct sbp2_logical_unit *lu = cmd->device->hostdata; in sbp2_scsi_queuecommand() local
1445 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_scsi_queuecommand()
1459 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | in sbp2_scsi_queuecommand()
1469 if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0) in sbp2_scsi_queuecommand()
1483 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, in sbp2_scsi_queuecommand()
1484 lu->command_block_agent_address + SBP2_ORB_POINTER); in sbp2_scsi_queuecommand()
1493 struct sbp2_logical_unit *lu = sdev->hostdata; in sbp2_scsi_slave_alloc() local
1496 if (!lu) in sbp2_scsi_slave_alloc()
1507 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) in sbp2_scsi_slave_alloc()
1515 struct sbp2_logical_unit *lu = sdev->hostdata; in sbp2_scsi_slave_configure() local
1526 lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) in sbp2_scsi_slave_configure()
1529 if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) in sbp2_scsi_slave_configure()
1532 if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION) in sbp2_scsi_slave_configure()
1535 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) in sbp2_scsi_slave_configure()
1547 struct sbp2_logical_unit *lu = cmd->device->hostdata; in sbp2_scsi_abort() local
1549 dev_notice(lu_dev(lu), "sbp2_scsi_abort\n"); in sbp2_scsi_abort()
1550 sbp2_agent_reset(lu); in sbp2_scsi_abort()
1551 sbp2_cancel_orbs(lu); in sbp2_scsi_abort()
1567 struct sbp2_logical_unit *lu; in sbp2_sysfs_ieee1394_id_show() local
1572 lu = sdev->hostdata; in sbp2_sysfs_ieee1394_id_show()
1575 (unsigned long long)lu->tgt->guid, in sbp2_sysfs_ieee1394_id_show()
1576 lu->tgt->directory_id, lu->lun); in sbp2_sysfs_ieee1394_id_show()