Lines Matching defs:nvdimm_bus

55 		struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
57 return nvdimm_bus->nd_desc->module;
62 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
64 nvdimm_bus_lock(&nvdimm_bus->dev);
65 nvdimm_bus->probe_active++;
66 nvdimm_bus_unlock(&nvdimm_bus->dev);
69 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
71 nvdimm_bus_lock(&nvdimm_bus->dev);
72 if (--nvdimm_bus->probe_active == 0)
73 wake_up(&nvdimm_bus->wait);
74 nvdimm_bus_unlock(&nvdimm_bus->dev);
81 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
87 dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
90 nvdimm_bus_probe_start(nvdimm_bus);
95 nvdimm_bus_probe_end(nvdimm_bus);
97 dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
109 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
114 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s)\n", dev->driver->name,
121 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
129 dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
150 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
152 if (!nvdimm_bus)
192 static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
200 device_for_each_child(&nvdimm_bus->dev, &ctx,
204 static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
208 badrange_forget(&nvdimm_bus->badrange, phys, cleared);
211 nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
217 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
225 if (!nvdimm_bus)
228 nd_desc = nvdimm_bus->nd_desc;
266 nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
285 struct nvdimm_bus *nvdimm_bus;
287 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
288 ida_free(&nd_ida, nvdimm_bus->id);
289 kfree(nvdimm_bus);
302 struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
315 struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
317 struct nvdimm_bus *nvdimm_bus;
319 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
321 return nvdimm_bus;
325 struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
333 struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
336 struct nvdimm_bus *nvdimm_bus;
339 nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
340 if (!nvdimm_bus)
342 INIT_LIST_HEAD(&nvdimm_bus->list);
343 INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
344 init_waitqueue_head(&nvdimm_bus->wait);
345 nvdimm_bus->id = ida_alloc(&nd_ida, GFP_KERNEL);
346 if (nvdimm_bus->id < 0) {
347 kfree(nvdimm_bus);
350 mutex_init(&nvdimm_bus->reconfig_mutex);
351 badrange_init(&nvdimm_bus->badrange);
352 nvdimm_bus->nd_desc = nd_desc;
353 nvdimm_bus->dev.parent = parent;
354 nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
355 nvdimm_bus->dev.groups = nd_desc->attr_groups;
356 nvdimm_bus->dev.bus = &nvdimm_bus_type;
357 nvdimm_bus->dev.of_node = nd_desc->of_node;
358 device_initialize(&nvdimm_bus->dev);
359 lockdep_set_class(&nvdimm_bus->dev.mutex, &nvdimm_bus_key);
360 device_set_pm_not_required(&nvdimm_bus->dev);
361 rc = dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
365 rc = device_add(&nvdimm_bus->dev);
367 dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
371 return nvdimm_bus;
373 put_device(&nvdimm_bus->dev);
378 void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
380 if (!nvdimm_bus)
382 device_unregister(&nvdimm_bus->dev);
418 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
421 list_del_init(&nvdimm_bus->list);
424 wait_event(nvdimm_bus->wait,
425 atomic_read(&nvdimm_bus->ioctl_active) == 0);
428 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
430 spin_lock(&nvdimm_bus->badrange.lock);
431 free_badrange_list(&nvdimm_bus->badrange.list);
432 spin_unlock(&nvdimm_bus->badrange.lock);
434 nvdimm_bus_destroy_ndctl(nvdimm_bus);
439 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
442 rc = nvdimm_bus_create_ndctl(nvdimm_bus);
447 list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
451 dev_set_drvdata(dev, nvdimm_bus->nd_desc);
733 int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
735 dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
746 dev->parent = &nvdimm_bus->dev;
749 rc = dev_set_name(dev, "ndctl%d", nvdimm_bus->id);
755 dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %d\n",
756 nvdimm_bus->id, rc);
766 void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
768 device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
934 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
937 if (nvdimm_bus->probe_active == 0)
941 wait_event(nvdimm_bus->wait,
942 nvdimm_bus->probe_active == 0);
995 static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
998 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
1010 return device_for_each_child(&nvdimm_bus->dev, data,
1017 wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
1023 static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1026 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
1029 struct device *dev = &nvdimm_bus->dev;
1179 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
1190 nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
1229 struct nvdimm_bus *nvdimm_bus, *found = NULL;
1236 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
1240 dev = device_find_child(&nvdimm_bus->dev,
1245 found = nvdimm_bus;
1246 } else if (nvdimm_bus->id == id) {
1247 found = nvdimm_bus;
1251 atomic_inc(&nvdimm_bus->ioctl_active);
1260 nvdimm_bus = found;
1261 rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
1265 if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
1266 wake_up(&nvdimm_bus->wait);