Lines Matching refs:subsys

484 	struct nvmet_subsys *subsys = ns->subsys;
488 mutex_lock(&subsys->lock);
504 mutex_unlock(&subsys->lock);
508 mutex_unlock(&subsys->lock);
531 mutex_lock(&ns->subsys->lock);
548 mutex_unlock(&ns->subsys->lock);
565 struct nvmet_subsys *subsys = ns->subsys;
568 mutex_lock(&subsys->lock);
578 mutex_unlock(&subsys->lock);
593 struct nvmet_subsys *subsys = ns->subsys;
599 mutex_lock(&subsys->lock);
624 mutex_unlock(&subsys->lock);
658 nvmet_send_ana_event(ns->subsys, NULL);
703 mutex_lock(&ns->subsys->lock);
706 mutex_unlock(&ns->subsys->lock);
711 mutex_unlock(&ns->subsys->lock);
729 mutex_lock(&ns->subsys->lock);
732 mutex_unlock(&ns->subsys->lock);
736 nvmet_ns_changed(ns->subsys, ns->nsid);
737 mutex_unlock(&ns->subsys->lock);
757 bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
763 mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
764 ns_item = config_group_find_item(&subsys->namespaces_group, name);
765 mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
789 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
805 ns = nvmet_ns_alloc(subsys, nsid);
810 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
831 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
833 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
839 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
843 mutex_lock(&subsys->lock);
846 if (subsys->passthru_ctrl)
854 kfree(subsys->passthru_ctrl_path);
856 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
857 if (!subsys->passthru_ctrl_path)
860 mutex_unlock(&subsys->lock);
864 mutex_unlock(&subsys->lock);
872 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
874 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
880 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
888 ret = nvmet_passthru_ctrl_enable(subsys);
890 nvmet_passthru_ctrl_disable(subsys);
905 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
910 subsys->admin_timeout = timeout;
924 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
929 subsys->io_timeout = timeout;
943 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
948 subsys->clear_ids = clear_ids;
967 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
969 config_group_init_type_name(&subsys->passthru_group,
971 configfs_add_default_group(&subsys->passthru_group,
972 &subsys->group);
977 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
987 struct nvmet_subsys *subsys;
995 subsys = to_subsys(target);
999 link->subsys = subsys;
1004 if (p->subsys == subsys)
1015 nvmet_port_disc_changed(port, subsys);
1030 struct nvmet_subsys *subsys = to_subsys(target);
1035 if (p->subsys == subsys)
1043 nvmet_port_del_ctrls(port, subsys);
1044 nvmet_port_disc_changed(port, subsys);
1065 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1083 if (subsys->allow_any_host) {
1089 list_for_each_entry(p, &subsys->hosts, entry) {
1093 list_add_tail(&link->entry, &subsys->hosts);
1094 nvmet_subsys_disc_changed(subsys, host);
1107 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1112 list_for_each_entry(p, &subsys->hosts, entry) {
1121 nvmet_subsys_disc_changed(subsys, host);
1147 struct nvmet_subsys *subsys = to_subsys(item);
1155 if (allow_any_host && !list_empty(&subsys->hosts)) {
1161 if (subsys->allow_any_host != allow_any_host) {
1162 subsys->allow_any_host = allow_any_host;
1163 nvmet_subsys_disc_changed(subsys, NULL);
1176 struct nvmet_subsys *subsys = to_subsys(item);
1178 if (NVME_TERTIARY(subsys->ver))
1180 NVME_MAJOR(subsys->ver),
1181 NVME_MINOR(subsys->ver),
1182 NVME_TERTIARY(subsys->ver));
1185 NVME_MAJOR(subsys->ver),
1186 NVME_MINOR(subsys->ver));
1190 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1196 if (subsys->subsys_discovered) {
1197 if (NVME_TERTIARY(subsys->ver))
1199 NVME_MAJOR(subsys->ver),
1200 NVME_MINOR(subsys->ver),
1201 NVME_TERTIARY(subsys->ver));
1204 NVME_MAJOR(subsys->ver),
1205 NVME_MINOR(subsys->ver));
1210 if (nvmet_is_passthru_subsys(subsys))
1217 subsys->ver = NVME_VS(major, minor, tertiary);
1225 struct nvmet_subsys *subsys = to_subsys(item);
1229 mutex_lock(&subsys->lock);
1230 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1231 mutex_unlock(&subsys->lock);
1247 struct nvmet_subsys *subsys = to_subsys(item);
1250 NVMET_SN_MAX_SIZE, subsys->serial);
1254 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1259 if (subsys->subsys_discovered) {
1261 subsys->serial);
1278 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1286 struct nvmet_subsys *subsys = to_subsys(item);
1290 mutex_lock(&subsys->lock);
1291 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1292 mutex_unlock(&subsys->lock);
1362 struct nvmet_subsys *subsys = to_subsys(item);
1364 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1367 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1373 if (subsys->subsys_discovered) {
1375 subsys->model_number);
1397 kfree(subsys->model_number);
1398 subsys->model_number = val;
1405 struct nvmet_subsys *subsys = to_subsys(item);
1409 mutex_lock(&subsys->lock);
1410 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1411 mutex_unlock(&subsys->lock);
1421 struct nvmet_subsys *subsys = to_subsys(item);
1423 return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1426 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1432 if (subsys->subsys_discovered) {
1434 subsys->ieee_oui);
1445 subsys->ieee_oui = val;
1453 struct nvmet_subsys *subsys = to_subsys(item);
1457 mutex_lock(&subsys->lock);
1458 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1459 mutex_unlock(&subsys->lock);
1469 struct nvmet_subsys *subsys = to_subsys(item);
1471 return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1474 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1480 if (subsys->subsys_discovered) {
1482 subsys->firmware_rev);
1505 kfree(subsys->firmware_rev);
1507 subsys->firmware_rev = val;
1515 struct nvmet_subsys *subsys = to_subsys(item);
1519 mutex_lock(&subsys->lock);
1520 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1521 mutex_unlock(&subsys->lock);
1538 struct nvmet_subsys *subsys = to_subsys(item);
1544 subsys->pi_support = pi_enable;
1559 struct nvmet_subsys *subsys = to_subsys(item);
1570 subsys->max_qid = qid_max;
1573 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1602 struct nvmet_subsys *subsys = to_subsys(item);
1604 nvmet_subsys_del_ctrls(subsys);
1605 nvmet_subsys_put(subsys);
1621 struct nvmet_subsys *subsys;
1633 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1634 if (IS_ERR(subsys))
1635 return ERR_CAST(subsys);
1637 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1639 config_group_init_type_name(&subsys->namespaces_group,
1641 configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1643 config_group_init_type_name(&subsys->allowed_hosts_group,
1645 configfs_add_default_group(&subsys->allowed_hosts_group,
1646 &subsys->group);
1648 nvmet_add_passthru_group(subsys);
1650 return &subsys->group;