Lines Matching refs:tgt

119 	struct sbp2_target *tgt;  member
169 static struct fw_device *target_parent_device(struct sbp2_target *tgt) in target_parent_device() argument
171 return fw_parent_device(tgt->unit); in target_parent_device()
174 static const struct device *tgt_dev(const struct sbp2_target *tgt) in tgt_dev() argument
176 return &tgt->unit->device; in tgt_dev()
181 return &lu->tgt->unit->device; in lu_dev()
435 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_status_write()
445 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_status_write()
472 spin_lock_irqsave(&orb->lu->tgt->lock, flags); in complete_transaction()
478 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
483 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
492 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_orb()
500 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_send_orb()
502 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_send_orb()
514 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_cancel_orbs()
520 spin_lock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
522 spin_unlock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
552 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_management_orb()
591 timeout = lu->tgt->mgt_orb_timeout; in sbp2_send_management_orb()
606 lu->tgt->management_agent_address); in sbp2_send_management_orb()
648 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset()
652 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset()
665 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset_no_wait()
674 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset_no_wait()
679 static inline void sbp2_allow_block(struct sbp2_target *tgt) in sbp2_allow_block() argument
681 spin_lock_irq(&tgt->lock); in sbp2_allow_block()
682 --tgt->dont_block; in sbp2_allow_block()
683 spin_unlock_irq(&tgt->lock); in sbp2_allow_block()
698 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_block() local
699 struct fw_card *card = target_parent_device(tgt)->card; in sbp2_conditionally_block()
701 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_conditionally_block()
704 spin_lock_irqsave(&tgt->lock, flags); in sbp2_conditionally_block()
705 if (!tgt->dont_block && !lu->blocked && in sbp2_conditionally_block()
708 if (++tgt->blocked == 1) in sbp2_conditionally_block()
711 spin_unlock_irqrestore(&tgt->lock, flags); in sbp2_conditionally_block()
722 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_unblock() local
723 struct fw_card *card = target_parent_device(tgt)->card; in sbp2_conditionally_unblock()
725 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_conditionally_unblock()
728 spin_lock_irq(&tgt->lock); in sbp2_conditionally_unblock()
731 unblock = --tgt->blocked == 0; in sbp2_conditionally_unblock()
733 spin_unlock_irq(&tgt->lock); in sbp2_conditionally_unblock()
745 static void sbp2_unblock(struct sbp2_target *tgt) in sbp2_unblock() argument
748 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_unblock()
750 spin_lock_irq(&tgt->lock); in sbp2_unblock()
751 ++tgt->dont_block; in sbp2_unblock()
752 spin_unlock_irq(&tgt->lock); in sbp2_unblock()
786 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_set_busy_timeout()
790 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_set_busy_timeout()
800 struct sbp2_target *tgt = lu->tgt; in sbp2_login() local
801 struct fw_device *device = target_parent_device(tgt); in sbp2_login()
825 dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n", in sbp2_login()
828 sbp2_unblock(lu->tgt); in sbp2_login()
833 tgt->node_id = node_id; in sbp2_login()
834 tgt->address_high = local_node_id << 16; in sbp2_login()
843 dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n", in sbp2_login()
860 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) in sbp2_login()
863 shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_login()
888 sbp2_allow_block(tgt); in sbp2_login()
910 struct sbp2_target *tgt = lu->tgt; in sbp2_reconnect() local
911 struct fw_device *device = target_parent_device(tgt); in sbp2_reconnect()
935 dev_err(tgt_dev(tgt), "failed to reconnect\n"); in sbp2_reconnect()
944 tgt->node_id = node_id; in sbp2_reconnect()
945 tgt->address_high = local_node_id << 16; in sbp2_reconnect()
949 dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n", in sbp2_reconnect()
964 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) in sbp2_add_logical_unit() argument
982 lu->tgt = tgt; in sbp2_add_logical_unit()
988 ++tgt->dont_block; in sbp2_add_logical_unit()
993 list_add_tail(&lu->link, &tgt->lu_list); in sbp2_add_logical_unit()
997 static void sbp2_get_unit_unique_id(struct sbp2_target *tgt, in sbp2_get_unit_unique_id() argument
1001 tgt->guid = (u64)leaf[1] << 32 | leaf[2]; in sbp2_get_unit_unique_id()
1004 static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, in sbp2_scan_logical_unit_dir() argument
1013 sbp2_add_logical_unit(tgt, value) < 0) in sbp2_scan_logical_unit_dir()
1018 static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory, in sbp2_scan_unit_dir() argument
1029 tgt->management_agent_address = in sbp2_scan_unit_dir()
1034 tgt->directory_id = value; in sbp2_scan_unit_dir()
1047 tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500; in sbp2_scan_unit_dir()
1051 if (sbp2_add_logical_unit(tgt, value) < 0) in sbp2_scan_unit_dir()
1056 sbp2_get_unit_unique_id(tgt, ci.p - 1 + value); in sbp2_scan_unit_dir()
1061 if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) in sbp2_scan_unit_dir()
1074 static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt) in sbp2_clamp_management_orb_timeout() argument
1076 unsigned int timeout = tgt->mgt_orb_timeout; in sbp2_clamp_management_orb_timeout()
1079 dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n", in sbp2_clamp_management_orb_timeout()
1082 tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); in sbp2_clamp_management_orb_timeout()
1085 static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, in sbp2_init_workarounds() argument
1092 dev_notice(tgt_dev(tgt), in sbp2_init_workarounds()
1114 dev_notice(tgt_dev(tgt), "workarounds 0x%x " in sbp2_init_workarounds()
1117 tgt->workarounds = w; in sbp2_init_workarounds()
1126 struct sbp2_target *tgt; in sbp2_probe() local
1135 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); in sbp2_probe()
1139 tgt = (struct sbp2_target *)shost->hostdata; in sbp2_probe()
1140 dev_set_drvdata(&unit->device, tgt); in sbp2_probe()
1141 tgt->unit = unit; in sbp2_probe()
1142 INIT_LIST_HEAD(&tgt->lu_list); in sbp2_probe()
1143 spin_lock_init(&tgt->lock); in sbp2_probe()
1144 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; in sbp2_probe()
1156 tgt->directory_id = ((unit->directory - device->config_rom) * 4 in sbp2_probe()
1162 if (sbp2_scan_unit_dir(tgt, unit->directory, &model, in sbp2_probe()
1166 sbp2_clamp_management_orb_timeout(tgt); in sbp2_probe()
1167 sbp2_init_workarounds(tgt, model, firmware_revision); in sbp2_probe()
1175 tgt->max_payload = min3(device->max_speed + 7, 10U, in sbp2_probe()
1179 list_for_each_entry(lu, &tgt->lu_list, link) in sbp2_probe()
1195 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); in sbp2_update() local
1204 list_for_each_entry(lu, &tgt->lu_list, link) { in sbp2_update()
1214 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); in sbp2_remove() local
1217 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_remove()
1221 sbp2_unblock(tgt); in sbp2_remove()
1223 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { in sbp2_remove()
1340 struct fw_device *device = target_parent_device(base_orb->lu->tgt); in complete_command_orb()
1401 cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1427 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1446 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_scsi_queuecommand()
1460 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | in sbp2_scsi_queuecommand()
1484 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, in sbp2_scsi_queuecommand()
1508 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) in sbp2_scsi_slave_alloc()
1527 lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) in sbp2_scsi_slave_configure()
1530 if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) in sbp2_scsi_slave_configure()
1533 if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION) in sbp2_scsi_slave_configure()
1536 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) in sbp2_scsi_slave_configure()
1576 (unsigned long long)lu->tgt->guid, in sbp2_sysfs_ieee1394_id_show()
1577 lu->tgt->directory_id, lu->lun); in sbp2_sysfs_ieee1394_id_show()