1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/device.h>
6#include <linux/sizes.h>
7#include <linux/badblocks.h>
8#include "nd-core.h"
9#include "pmem.h"
10#include "pfn.h"
11#include "btt.h"
12#include "nd.h"
13
14void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
15{
16	struct nd_namespace_common *ndns = *_ndns;
17	struct nvdimm_bus *nvdimm_bus;
18
19	if (!ndns)
20		return;
21
22	nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
23	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
24	dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
25	ndns->claim = NULL;
26	*_ndns = NULL;
27	put_device(&ndns->dev);
28}
29
30void nd_detach_ndns(struct device *dev,
31		struct nd_namespace_common **_ndns)
32{
33	struct nd_namespace_common *ndns = *_ndns;
34
35	if (!ndns)
36		return;
37	get_device(&ndns->dev);
38	nvdimm_bus_lock(&ndns->dev);
39	__nd_detach_ndns(dev, _ndns);
40	nvdimm_bus_unlock(&ndns->dev);
41	put_device(&ndns->dev);
42}
43
44bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
45		struct nd_namespace_common **_ndns)
46{
47	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
48
49	if (attach->claim)
50		return false;
51	lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
52	dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
53	attach->claim = dev;
54	*_ndns = attach;
55	get_device(&attach->dev);
56	return true;
57}
58
59bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
60		struct nd_namespace_common **_ndns)
61{
62	bool claimed;
63
64	nvdimm_bus_lock(&attach->dev);
65	claimed = __nd_attach_ndns(dev, attach, _ndns);
66	nvdimm_bus_unlock(&attach->dev);
67	return claimed;
68}
69
70static int namespace_match(struct device *dev, void *data)
71{
72	char *name = data;
73
74	return strcmp(name, dev_name(dev)) == 0;
75}
76
77static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
78{
79	struct nd_region *nd_region = to_nd_region(dev->parent);
80	struct device *seed = NULL;
81
82	if (is_nd_btt(dev))
83		seed = nd_region->btt_seed;
84	else if (is_nd_pfn(dev))
85		seed = nd_region->pfn_seed;
86	else if (is_nd_dax(dev))
87		seed = nd_region->dax_seed;
88
89	if (seed == dev || ndns || dev->driver)
90		return false;
91	return true;
92}
93
94struct nd_pfn *to_nd_pfn_safe(struct device *dev)
95{
96	/*
97	 * pfn device attributes are re-used by dax device instances, so we
98	 * need to be careful to correct device-to-nd_pfn conversion.
99	 */
100	if (is_nd_pfn(dev))
101		return to_nd_pfn(dev);
102
103	if (is_nd_dax(dev)) {
104		struct nd_dax *nd_dax = to_nd_dax(dev);
105
106		return &nd_dax->nd_pfn;
107	}
108
109	WARN_ON(1);
110	return NULL;
111}
112
113static void nd_detach_and_reset(struct device *dev,
114		struct nd_namespace_common **_ndns)
115{
116	/* detach the namespace and destroy / reset the device */
117	__nd_detach_ndns(dev, _ndns);
118	if (is_idle(dev, *_ndns)) {
119		nd_device_unregister(dev, ND_ASYNC);
120	} else if (is_nd_btt(dev)) {
121		struct nd_btt *nd_btt = to_nd_btt(dev);
122
123		nd_btt->lbasize = 0;
124		kfree(nd_btt->uuid);
125		nd_btt->uuid = NULL;
126	} else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
127		struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
128
129		kfree(nd_pfn->uuid);
130		nd_pfn->uuid = NULL;
131		nd_pfn->mode = PFN_MODE_NONE;
132	}
133}
134
135ssize_t nd_namespace_store(struct device *dev,
136		struct nd_namespace_common **_ndns, const char *buf,
137		size_t len)
138{
139	struct nd_namespace_common *ndns;
140	struct device *found;
141	char *name;
142
143	if (dev->driver) {
144		dev_dbg(dev, "namespace already active\n");
145		return -EBUSY;
146	}
147
148	name = kstrndup(buf, len, GFP_KERNEL);
149	if (!name)
150		return -ENOMEM;
151	strim(name);
152
153	if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
154		/* pass */;
155	else {
156		len = -EINVAL;
157		goto out;
158	}
159
160	ndns = *_ndns;
161	if (strcmp(name, "") == 0) {
162		nd_detach_and_reset(dev, _ndns);
163		goto out;
164	} else if (ndns) {
165		dev_dbg(dev, "namespace already set to: %s\n",
166				dev_name(&ndns->dev));
167		len = -EBUSY;
168		goto out;
169	}
170
171	found = device_find_child(dev->parent, name, namespace_match);
172	if (!found) {
173		dev_dbg(dev, "'%s' not found under %s\n", name,
174				dev_name(dev->parent));
175		len = -ENODEV;
176		goto out;
177	}
178
179	ndns = to_ndns(found);
180
181	switch (ndns->claim_class) {
182	case NVDIMM_CCLASS_NONE:
183		break;
184	case NVDIMM_CCLASS_BTT:
185	case NVDIMM_CCLASS_BTT2:
186		if (!is_nd_btt(dev)) {
187			len = -EBUSY;
188			goto out_attach;
189		}
190		break;
191	case NVDIMM_CCLASS_PFN:
192		if (!is_nd_pfn(dev)) {
193			len = -EBUSY;
194			goto out_attach;
195		}
196		break;
197	case NVDIMM_CCLASS_DAX:
198		if (!is_nd_dax(dev)) {
199			len = -EBUSY;
200			goto out_attach;
201		}
202		break;
203	default:
204		len = -EBUSY;
205		goto out_attach;
206		break;
207	}
208
209	if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
210		dev_dbg(dev, "%s too small to host\n", name);
211		len = -ENXIO;
212		goto out_attach;
213	}
214
215	WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
216	if (!__nd_attach_ndns(dev, ndns, _ndns)) {
217		dev_dbg(dev, "%s already claimed\n",
218				dev_name(&ndns->dev));
219		len = -EBUSY;
220	}
221
222 out_attach:
223	put_device(&ndns->dev); /* from device_find_child */
224 out:
225	kfree(name);
226	return len;
227}
228
229/*
230 * nd_sb_checksum: compute checksum for a generic info block
231 *
232 * Returns a fletcher64 checksum of everything in the given info block
233 * except the last field (since that's where the checksum lives).
234 */
235u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
236{
237	u64 sum;
238	__le64 sum_save;
239
240	BUILD_BUG_ON(sizeof(struct btt_sb) != SZ_4K);
241	BUILD_BUG_ON(sizeof(struct nd_pfn_sb) != SZ_4K);
242	BUILD_BUG_ON(sizeof(struct nd_gen_sb) != SZ_4K);
243
244	sum_save = nd_gen_sb->checksum;
245	nd_gen_sb->checksum = 0;
246	sum = nd_fletcher64(nd_gen_sb, sizeof(*nd_gen_sb), 1);
247	nd_gen_sb->checksum = sum_save;
248	return sum;
249}
250EXPORT_SYMBOL(nd_sb_checksum);
251
252static int nsio_rw_bytes(struct nd_namespace_common *ndns,
253		resource_size_t offset, void *buf, size_t size, int rw,
254		unsigned long flags)
255{
256	struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
257	unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
258	sector_t sector = offset >> 9;
259	int rc = 0, ret = 0;
260
261	if (unlikely(!size))
262		return 0;
263
264	if (unlikely(offset + size > nsio->size)) {
265		dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
266		return -EFAULT;
267	}
268
269	if (rw == READ) {
270		if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
271			return -EIO;
272		if (copy_mc_to_kernel(buf, nsio->addr + offset, size) != 0)
273			return -EIO;
274		return 0;
275	}
276
277	if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
278		if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
279				&& !(flags & NVDIMM_IO_ATOMIC)) {
280			long cleared;
281
282			might_sleep();
283			cleared = nvdimm_clear_poison(&ndns->dev,
284					nsio->res.start + offset, size);
285			if (cleared < size)
286				rc = -EIO;
287			if (cleared > 0 && cleared / 512) {
288				cleared /= 512;
289				badblocks_clear(&nsio->bb, sector, cleared);
290			}
291			arch_invalidate_pmem(nsio->addr + offset, size);
292		} else
293			rc = -EIO;
294	}
295
296	memcpy_flushcache(nsio->addr + offset, buf, size);
297	ret = nvdimm_flush(to_nd_region(ndns->dev.parent), NULL);
298	if (ret)
299		rc = ret;
300
301	return rc;
302}
303
304int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
305		resource_size_t size)
306{
307	struct nd_namespace_common *ndns = &nsio->common;
308	struct range range = {
309		.start = nsio->res.start,
310		.end = nsio->res.end,
311	};
312
313	nsio->size = size;
314	if (!devm_request_mem_region(dev, range.start, size,
315				dev_name(&ndns->dev))) {
316		dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
317		return -EBUSY;
318	}
319
320	ndns->rw_bytes = nsio_rw_bytes;
321	if (devm_init_badblocks(dev, &nsio->bb))
322		return -ENOMEM;
323	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
324			&range);
325
326	nsio->addr = devm_memremap(dev, range.start, size, ARCH_MEMREMAP_PMEM);
327
328	return PTR_ERR_OR_ZERO(nsio->addr);
329}
330
331void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
332{
333	struct resource *res = &nsio->res;
334
335	devm_memunmap(dev, nsio->addr);
336	devm_exit_badblocks(dev, &nsio->bb);
337	devm_release_mem_region(dev, res->start, nsio->size);
338}
339