1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3#include <linux/seq_file.h>
4#include <linux/device.h>
5#include <linux/delay.h>
6
7#include "cxlmem.h"
8#include "core.h"
9
10/**
11 * DOC: cxl core hdm
12 *
13 * Compute Express Link Host Managed Device Memory, starting with the
14 * CXL 2.0 specification, is managed by an array of HDM Decoder register
15 * instances per CXL port and per CXL endpoint. Define common helpers
16 * for enumerating these registers and capabilities.
17 */
18
19DECLARE_RWSEM(cxl_dpa_rwsem);
20
21static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
22			   int *target_map)
23{
24	int rc;
25
26	rc = cxl_decoder_add_locked(cxld, target_map);
27	if (rc) {
28		put_device(&cxld->dev);
29		dev_err(&port->dev, "Failed to add decoder\n");
30		return rc;
31	}
32
33	rc = cxl_decoder_autoremove(&port->dev, cxld);
34	if (rc)
35		return rc;
36
37	dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
38
39	return 0;
40}
41
42/*
43 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
44 * single ported host-bridges need not publish a decoder capability when a
45 * passthrough decode can be assumed, i.e. all transactions that the uport sees
46 * are claimed and passed to the single dport. Disable the range until the first
47 * CXL region is enumerated / activated.
48 */
49int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
50{
51	struct cxl_switch_decoder *cxlsd;
52	struct cxl_dport *dport = NULL;
53	int single_port_map[1];
54	unsigned long index;
55
56	cxlsd = cxl_switch_decoder_alloc(port, 1);
57	if (IS_ERR(cxlsd))
58		return PTR_ERR(cxlsd);
59
60	device_lock_assert(&port->dev);
61
62	xa_for_each(&port->dports, index, dport)
63		break;
64	single_port_map[0] = dport->port_id;
65
66	return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
67}
68EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
69
70static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
71{
72	u32 hdm_cap;
73
74	hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET);
75	cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap);
76	cxlhdm->target_count =
77		FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap);
78	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap))
79		cxlhdm->interleave_mask |= GENMASK(11, 8);
80	if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
81		cxlhdm->interleave_mask |= GENMASK(14, 12);
82}
83
84static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
85{
86	struct cxl_hdm *cxlhdm;
87	void __iomem *hdm;
88	u32 ctrl;
89	int i;
90
91	if (!info)
92		return false;
93
94	cxlhdm = dev_get_drvdata(&info->port->dev);
95	hdm = cxlhdm->regs.hdm_decoder;
96
97	if (!hdm)
98		return true;
99
100	/*
101	 * If HDM decoders are present and the driver is in control of
102	 * Mem_Enable skip DVSEC based emulation
103	 */
104	if (!info->mem_enabled)
105		return false;
106
107	/*
108	 * If any decoders are committed already, there should not be any
109	 * emulated DVSEC decoders.
110	 */
111	for (i = 0; i < cxlhdm->decoder_count; i++) {
112		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
113		dev_dbg(&info->port->dev,
114			"decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
115			info->port->id, i,
116			FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
117			readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
118			readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
119			readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
120			readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
121		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
122			return false;
123	}
124
125	return true;
126}
127
128/**
129 * devm_cxl_setup_hdm - map HDM decoder component registers
130 * @port: cxl_port to map
131 * @info: cached DVSEC range register info
132 */
133struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
134				   struct cxl_endpoint_dvsec_info *info)
135{
136	struct cxl_register_map *reg_map = &port->reg_map;
137	struct device *dev = &port->dev;
138	struct cxl_hdm *cxlhdm;
139	int rc;
140
141	cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
142	if (!cxlhdm)
143		return ERR_PTR(-ENOMEM);
144	cxlhdm->port = port;
145	dev_set_drvdata(dev, cxlhdm);
146
147	/* Memory devices can configure device HDM using DVSEC range regs. */
148	if (reg_map->resource == CXL_RESOURCE_NONE) {
149		if (!info || !info->mem_enabled) {
150			dev_err(dev, "No component registers mapped\n");
151			return ERR_PTR(-ENXIO);
152		}
153
154		cxlhdm->decoder_count = info->ranges;
155		return cxlhdm;
156	}
157
158	if (!reg_map->component_map.hdm_decoder.valid) {
159		dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
160		/* unique error code to indicate no HDM decoder capability */
161		return ERR_PTR(-ENODEV);
162	}
163
164	rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
165				    BIT(CXL_CM_CAP_CAP_ID_HDM));
166	if (rc) {
167		dev_err(dev, "Failed to map HDM capability.\n");
168		return ERR_PTR(rc);
169	}
170
171	parse_hdm_decoder_caps(cxlhdm);
172	if (cxlhdm->decoder_count == 0) {
173		dev_err(dev, "Spec violation. Caps invalid\n");
174		return ERR_PTR(-ENXIO);
175	}
176
177	/*
178	 * Now that the hdm capability is parsed, decide if range
179	 * register emulation is needed and fixup cxlhdm accordingly.
180	 */
181	if (should_emulate_decoders(info)) {
182		dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
183			info->ranges > 1 ? "s" : "");
184		cxlhdm->decoder_count = info->ranges;
185	}
186
187	return cxlhdm;
188}
189EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
190
191static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
192{
193	unsigned long long start = r->start, end = r->end;
194
195	seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end,
196		   r->name);
197}
198
199void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
200{
201	struct resource *p1, *p2;
202
203	down_read(&cxl_dpa_rwsem);
204	for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
205		__cxl_dpa_debug(file, p1, 0);
206		for (p2 = p1->child; p2; p2 = p2->sibling)
207			__cxl_dpa_debug(file, p2, 1);
208	}
209	up_read(&cxl_dpa_rwsem);
210}
211EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
212
213/*
214 * Must be called in a context that synchronizes against this decoder's
215 * port ->remove() callback (like an endpoint decoder sysfs attribute)
216 */
217static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
218{
219	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
220	struct cxl_port *port = cxled_to_port(cxled);
221	struct cxl_dev_state *cxlds = cxlmd->cxlds;
222	struct resource *res = cxled->dpa_res;
223	resource_size_t skip_start;
224
225	lockdep_assert_held_write(&cxl_dpa_rwsem);
226
227	/* save @skip_start, before @res is released */
228	skip_start = res->start - cxled->skip;
229	__release_region(&cxlds->dpa_res, res->start, resource_size(res));
230	if (cxled->skip)
231		__release_region(&cxlds->dpa_res, skip_start, cxled->skip);
232	cxled->skip = 0;
233	cxled->dpa_res = NULL;
234	put_device(&cxled->cxld.dev);
235	port->hdm_end--;
236}
237
238static void cxl_dpa_release(void *cxled)
239{
240	down_write(&cxl_dpa_rwsem);
241	__cxl_dpa_release(cxled);
242	up_write(&cxl_dpa_rwsem);
243}
244
245/*
246 * Must be called from context that will not race port device
247 * unregistration, like decoder sysfs attribute methods
248 */
249static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
250{
251	struct cxl_port *port = cxled_to_port(cxled);
252
253	lockdep_assert_held_write(&cxl_dpa_rwsem);
254	devm_remove_action(&port->dev, cxl_dpa_release, cxled);
255	__cxl_dpa_release(cxled);
256}
257
258static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
259			     resource_size_t base, resource_size_t len,
260			     resource_size_t skipped)
261{
262	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
263	struct cxl_port *port = cxled_to_port(cxled);
264	struct cxl_dev_state *cxlds = cxlmd->cxlds;
265	struct device *dev = &port->dev;
266	struct resource *res;
267
268	lockdep_assert_held_write(&cxl_dpa_rwsem);
269
270	if (!len) {
271		dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
272			 port->id, cxled->cxld.id);
273		return -EINVAL;
274	}
275
276	if (cxled->dpa_res) {
277		dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
278			port->id, cxled->cxld.id, cxled->dpa_res);
279		return -EBUSY;
280	}
281
282	if (port->hdm_end + 1 != cxled->cxld.id) {
283		/*
284		 * Assumes alloc and commit order is always in hardware instance
285		 * order per expectations from 8.2.5.12.20 Committing Decoder
286		 * Programming that enforce decoder[m] committed before
287		 * decoder[m+1] commit start.
288		 */
289		dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
290			cxled->cxld.id, port->id, port->hdm_end + 1);
291		return -EBUSY;
292	}
293
294	if (skipped) {
295		res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
296				       dev_name(&cxled->cxld.dev), 0);
297		if (!res) {
298			dev_dbg(dev,
299				"decoder%d.%d: failed to reserve skipped space\n",
300				port->id, cxled->cxld.id);
301			return -EBUSY;
302		}
303	}
304	res = __request_region(&cxlds->dpa_res, base, len,
305			       dev_name(&cxled->cxld.dev), 0);
306	if (!res) {
307		dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
308			port->id, cxled->cxld.id);
309		if (skipped)
310			__release_region(&cxlds->dpa_res, base - skipped,
311					 skipped);
312		return -EBUSY;
313	}
314	cxled->dpa_res = res;
315	cxled->skip = skipped;
316
317	if (resource_contains(&cxlds->pmem_res, res))
318		cxled->mode = CXL_DECODER_PMEM;
319	else if (resource_contains(&cxlds->ram_res, res))
320		cxled->mode = CXL_DECODER_RAM;
321	else {
322		dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
323			cxled->cxld.id, cxled->dpa_res);
324		cxled->mode = CXL_DECODER_MIXED;
325	}
326
327	port->hdm_end++;
328	get_device(&cxled->cxld.dev);
329	return 0;
330}
331
332int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
333				resource_size_t base, resource_size_t len,
334				resource_size_t skipped)
335{
336	struct cxl_port *port = cxled_to_port(cxled);
337	int rc;
338
339	down_write(&cxl_dpa_rwsem);
340	rc = __cxl_dpa_reserve(cxled, base, len, skipped);
341	up_write(&cxl_dpa_rwsem);
342
343	if (rc)
344		return rc;
345
346	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
347}
348EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL);
349
350resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
351{
352	resource_size_t size = 0;
353
354	down_read(&cxl_dpa_rwsem);
355	if (cxled->dpa_res)
356		size = resource_size(cxled->dpa_res);
357	up_read(&cxl_dpa_rwsem);
358
359	return size;
360}
361
362resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
363{
364	resource_size_t base = -1;
365
366	lockdep_assert_held(&cxl_dpa_rwsem);
367	if (cxled->dpa_res)
368		base = cxled->dpa_res->start;
369
370	return base;
371}
372
373int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
374{
375	struct cxl_port *port = cxled_to_port(cxled);
376	struct device *dev = &cxled->cxld.dev;
377	int rc;
378
379	down_write(&cxl_dpa_rwsem);
380	if (!cxled->dpa_res) {
381		rc = 0;
382		goto out;
383	}
384	if (cxled->cxld.region) {
385		dev_dbg(dev, "decoder assigned to: %s\n",
386			dev_name(&cxled->cxld.region->dev));
387		rc = -EBUSY;
388		goto out;
389	}
390	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
391		dev_dbg(dev, "decoder enabled\n");
392		rc = -EBUSY;
393		goto out;
394	}
395	if (cxled->cxld.id != port->hdm_end) {
396		dev_dbg(dev, "expected decoder%d.%d\n", port->id,
397			port->hdm_end);
398		rc = -EBUSY;
399		goto out;
400	}
401	devm_cxl_dpa_release(cxled);
402	rc = 0;
403out:
404	up_write(&cxl_dpa_rwsem);
405	return rc;
406}
407
408int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
409		     enum cxl_decoder_mode mode)
410{
411	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
412	struct cxl_dev_state *cxlds = cxlmd->cxlds;
413	struct device *dev = &cxled->cxld.dev;
414	int rc;
415
416	switch (mode) {
417	case CXL_DECODER_RAM:
418	case CXL_DECODER_PMEM:
419		break;
420	default:
421		dev_dbg(dev, "unsupported mode: %d\n", mode);
422		return -EINVAL;
423	}
424
425	down_write(&cxl_dpa_rwsem);
426	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
427		rc = -EBUSY;
428		goto out;
429	}
430
431	/*
432	 * Only allow modes that are supported by the current partition
433	 * configuration
434	 */
435	if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
436		dev_dbg(dev, "no available pmem capacity\n");
437		rc = -ENXIO;
438		goto out;
439	}
440	if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
441		dev_dbg(dev, "no available ram capacity\n");
442		rc = -ENXIO;
443		goto out;
444	}
445
446	cxled->mode = mode;
447	rc = 0;
448out:
449	up_write(&cxl_dpa_rwsem);
450
451	return rc;
452}
453
454int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
455{
456	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
457	resource_size_t free_ram_start, free_pmem_start;
458	struct cxl_port *port = cxled_to_port(cxled);
459	struct cxl_dev_state *cxlds = cxlmd->cxlds;
460	struct device *dev = &cxled->cxld.dev;
461	resource_size_t start, avail, skip;
462	struct resource *p, *last;
463	int rc;
464
465	down_write(&cxl_dpa_rwsem);
466	if (cxled->cxld.region) {
467		dev_dbg(dev, "decoder attached to %s\n",
468			dev_name(&cxled->cxld.region->dev));
469		rc = -EBUSY;
470		goto out;
471	}
472
473	if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
474		dev_dbg(dev, "decoder enabled\n");
475		rc = -EBUSY;
476		goto out;
477	}
478
479	for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
480		last = p;
481	if (last)
482		free_ram_start = last->end + 1;
483	else
484		free_ram_start = cxlds->ram_res.start;
485
486	for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
487		last = p;
488	if (last)
489		free_pmem_start = last->end + 1;
490	else
491		free_pmem_start = cxlds->pmem_res.start;
492
493	if (cxled->mode == CXL_DECODER_RAM) {
494		start = free_ram_start;
495		avail = cxlds->ram_res.end - start + 1;
496		skip = 0;
497	} else if (cxled->mode == CXL_DECODER_PMEM) {
498		resource_size_t skip_start, skip_end;
499
500		start = free_pmem_start;
501		avail = cxlds->pmem_res.end - start + 1;
502		skip_start = free_ram_start;
503
504		/*
505		 * If some pmem is already allocated, then that allocation
506		 * already handled the skip.
507		 */
508		if (cxlds->pmem_res.child &&
509		    skip_start == cxlds->pmem_res.child->start)
510			skip_end = skip_start - 1;
511		else
512			skip_end = start - 1;
513		skip = skip_end - skip_start + 1;
514	} else {
515		dev_dbg(dev, "mode not set\n");
516		rc = -EINVAL;
517		goto out;
518	}
519
520	if (size > avail) {
521		dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
522			cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
523			&avail);
524		rc = -ENOSPC;
525		goto out;
526	}
527
528	rc = __cxl_dpa_reserve(cxled, start, size, skip);
529out:
530	up_write(&cxl_dpa_rwsem);
531
532	if (rc)
533		return rc;
534
535	return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
536}
537
538static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
539{
540	u16 eig;
541	u8 eiw;
542
543	/*
544	 * Input validation ensures these warns never fire, but otherwise
545	 * suppress unititalized variable usage warnings.
546	 */
547	if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw),
548		      "invalid interleave_ways: %d\n", cxld->interleave_ways))
549		return;
550	if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig),
551		      "invalid interleave_granularity: %d\n",
552		      cxld->interleave_granularity))
553		return;
554
555	u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK);
556	u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK);
557	*ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT;
558}
559
560static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
561{
562	u32p_replace_bits(ctrl,
563			  !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
564			  CXL_HDM_DECODER0_CTRL_HOSTONLY);
565}
566
567static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
568{
569	struct cxl_dport **t = &cxlsd->target[0];
570	int ways = cxlsd->cxld.interleave_ways;
571
572	*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
573	if (ways > 1)
574		*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
575	if (ways > 2)
576		*tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id);
577	if (ways > 3)
578		*tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id);
579	if (ways > 4)
580		*tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id);
581	if (ways > 5)
582		*tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id);
583	if (ways > 6)
584		*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
585	if (ways > 7)
586		*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
587}
588
589/*
590 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set
591 * committed or error within 10ms, but just be generous with 20ms to account for
592 * clock skew and other marginal behavior
593 */
594#define COMMIT_TIMEOUT_MS 20
595static int cxld_await_commit(void __iomem *hdm, int id)
596{
597	u32 ctrl;
598	int i;
599
600	for (i = 0; i < COMMIT_TIMEOUT_MS; i++) {
601		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
602		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) {
603			ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
604			writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
605			return -EIO;
606		}
607		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
608			return 0;
609		fsleep(1000);
610	}
611
612	return -ETIMEDOUT;
613}
614
615static int cxl_decoder_commit(struct cxl_decoder *cxld)
616{
617	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
618	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
619	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
620	int id = cxld->id, rc;
621	u64 base, size;
622	u32 ctrl;
623
624	if (cxld->flags & CXL_DECODER_F_ENABLE)
625		return 0;
626
627	if (cxl_num_decoders_committed(port) != id) {
628		dev_dbg(&port->dev,
629			"%s: out of order commit, expected decoder%d.%d\n",
630			dev_name(&cxld->dev), port->id,
631			cxl_num_decoders_committed(port));
632		return -EBUSY;
633	}
634
635	/*
636	 * For endpoint decoders hosted on CXL memory devices that
637	 * support the sanitize operation, make sure sanitize is not in-flight.
638	 */
639	if (is_endpoint_decoder(&cxld->dev)) {
640		struct cxl_endpoint_decoder *cxled =
641			to_cxl_endpoint_decoder(&cxld->dev);
642		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
643		struct cxl_memdev_state *mds =
644			to_cxl_memdev_state(cxlmd->cxlds);
645
646		if (mds && mds->security.sanitize_active) {
647			dev_dbg(&cxlmd->dev,
648				"attempted to commit %s during sanitize\n",
649				dev_name(&cxld->dev));
650			return -EBUSY;
651		}
652	}
653
654	down_read(&cxl_dpa_rwsem);
655	/* common decoder settings */
656	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
657	cxld_set_interleave(cxld, &ctrl);
658	cxld_set_type(cxld, &ctrl);
659	base = cxld->hpa_range.start;
660	size = range_len(&cxld->hpa_range);
661
662	writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
663	writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
664	writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
665	writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
666
667	if (is_switch_decoder(&cxld->dev)) {
668		struct cxl_switch_decoder *cxlsd =
669			to_cxl_switch_decoder(&cxld->dev);
670		void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id);
671		void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
672		u64 targets;
673
674		cxlsd_set_targets(cxlsd, &targets);
675		writel(upper_32_bits(targets), tl_hi);
676		writel(lower_32_bits(targets), tl_lo);
677	} else {
678		struct cxl_endpoint_decoder *cxled =
679			to_cxl_endpoint_decoder(&cxld->dev);
680		void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id);
681		void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id);
682
683		writel(upper_32_bits(cxled->skip), sk_hi);
684		writel(lower_32_bits(cxled->skip), sk_lo);
685	}
686
687	writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
688	up_read(&cxl_dpa_rwsem);
689
690	port->commit_end++;
691	rc = cxld_await_commit(hdm, cxld->id);
692	if (rc) {
693		dev_dbg(&port->dev, "%s: error %d committing decoder\n",
694			dev_name(&cxld->dev), rc);
695		cxld->reset(cxld);
696		return rc;
697	}
698	cxld->flags |= CXL_DECODER_F_ENABLE;
699
700	return 0;
701}
702
703static int cxl_decoder_reset(struct cxl_decoder *cxld)
704{
705	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
706	struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
707	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
708	int id = cxld->id;
709	u32 ctrl;
710
711	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
712		return 0;
713
714	if (port->commit_end != id) {
715		dev_dbg(&port->dev,
716			"%s: out of order reset, expected decoder%d.%d\n",
717			dev_name(&cxld->dev), port->id, port->commit_end);
718		return -EBUSY;
719	}
720
721	down_read(&cxl_dpa_rwsem);
722	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
723	ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
724	writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
725
726	writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id));
727	writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
728	writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
729	writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
730	up_read(&cxl_dpa_rwsem);
731
732	port->commit_end--;
733	cxld->flags &= ~CXL_DECODER_F_ENABLE;
734
735	/* Userspace is now responsible for reconfiguring this decoder */
736	if (is_endpoint_decoder(&cxld->dev)) {
737		struct cxl_endpoint_decoder *cxled;
738
739		cxled = to_cxl_endpoint_decoder(&cxld->dev);
740		cxled->state = CXL_DECODER_STATE_MANUAL;
741	}
742
743	return 0;
744}
745
746static int cxl_setup_hdm_decoder_from_dvsec(
747	struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
748	int which, struct cxl_endpoint_dvsec_info *info)
749{
750	struct cxl_endpoint_decoder *cxled;
751	u64 len;
752	int rc;
753
754	if (!is_cxl_endpoint(port))
755		return -EOPNOTSUPP;
756
757	cxled = to_cxl_endpoint_decoder(&cxld->dev);
758	len = range_len(&info->dvsec_range[which]);
759	if (!len)
760		return -ENOENT;
761
762	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
763	cxld->commit = NULL;
764	cxld->reset = NULL;
765	cxld->hpa_range = info->dvsec_range[which];
766
767	/*
768	 * Set the emulated decoder as locked pending additional support to
769	 * change the range registers at run time.
770	 */
771	cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
772	port->commit_end = cxld->id;
773
774	rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
775	if (rc) {
776		dev_err(&port->dev,
777			"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
778			port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
779		return rc;
780	}
781	*dpa_base += len;
782	cxled->state = CXL_DECODER_STATE_AUTO;
783
784	return 0;
785}
786
787static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
788			    int *target_map, void __iomem *hdm, int which,
789			    u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
790{
791	struct cxl_endpoint_decoder *cxled = NULL;
792	u64 size, base, skip, dpa_size, lo, hi;
793	bool committed;
794	u32 remainder;
795	int i, rc;
796	u32 ctrl;
797	union {
798		u64 value;
799		unsigned char target_id[8];
800	} target_list;
801
802	if (should_emulate_decoders(info))
803		return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
804							which, info);
805
806	ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
807	lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
808	hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
809	base = (hi << 32) + lo;
810	lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
811	hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
812	size = (hi << 32) + lo;
813	committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
814	cxld->commit = cxl_decoder_commit;
815	cxld->reset = cxl_decoder_reset;
816
817	if (!committed)
818		size = 0;
819	if (base == U64_MAX || size == U64_MAX) {
820		dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
821			 port->id, cxld->id);
822		return -ENXIO;
823	}
824
825	if (info)
826		cxled = to_cxl_endpoint_decoder(&cxld->dev);
827	cxld->hpa_range = (struct range) {
828		.start = base,
829		.end = base + size - 1,
830	};
831
832	/* decoders are enabled if committed */
833	if (committed) {
834		cxld->flags |= CXL_DECODER_F_ENABLE;
835		if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
836			cxld->flags |= CXL_DECODER_F_LOCK;
837		if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
838			cxld->target_type = CXL_DECODER_HOSTONLYMEM;
839		else
840			cxld->target_type = CXL_DECODER_DEVMEM;
841
842		guard(rwsem_write)(&cxl_region_rwsem);
843		if (cxld->id != cxl_num_decoders_committed(port)) {
844			dev_warn(&port->dev,
845				 "decoder%d.%d: Committed out of order\n",
846				 port->id, cxld->id);
847			return -ENXIO;
848		}
849
850		if (size == 0) {
851			dev_warn(&port->dev,
852				 "decoder%d.%d: Committed with zero size\n",
853				 port->id, cxld->id);
854			return -ENXIO;
855		}
856		port->commit_end = cxld->id;
857	} else {
858		if (cxled) {
859			struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
860			struct cxl_dev_state *cxlds = cxlmd->cxlds;
861
862			/*
863			 * Default by devtype until a device arrives that needs
864			 * more precision.
865			 */
866			if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
867				cxld->target_type = CXL_DECODER_HOSTONLYMEM;
868			else
869				cxld->target_type = CXL_DECODER_DEVMEM;
870		} else {
871			/* To be overridden by region type at commit time */
872			cxld->target_type = CXL_DECODER_HOSTONLYMEM;
873		}
874
875		if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
876		    cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
877			ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
878			writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
879		}
880	}
881	rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
882			  &cxld->interleave_ways);
883	if (rc) {
884		dev_warn(&port->dev,
885			 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n",
886			 port->id, cxld->id, ctrl);
887		return rc;
888	}
889	rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
890				 &cxld->interleave_granularity);
891	if (rc)
892		return rc;
893
894	dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
895		port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
896		cxld->interleave_ways, cxld->interleave_granularity);
897
898	if (!cxled) {
899		lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
900		hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
901		target_list.value = (hi << 32) + lo;
902		for (i = 0; i < cxld->interleave_ways; i++)
903			target_map[i] = target_list.target_id[i];
904
905		return 0;
906	}
907
908	if (!committed)
909		return 0;
910
911	dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
912	if (remainder) {
913		dev_err(&port->dev,
914			"decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
915			port->id, cxld->id, size, cxld->interleave_ways);
916		return -ENXIO;
917	}
918	lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
919	hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
920	skip = (hi << 32) + lo;
921	rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
922	if (rc) {
923		dev_err(&port->dev,
924			"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
925			port->id, cxld->id, *dpa_base,
926			*dpa_base + dpa_size + skip - 1, rc);
927		return rc;
928	}
929	*dpa_base += dpa_size + skip;
930
931	cxled->state = CXL_DECODER_STATE_AUTO;
932
933	return 0;
934}
935
936static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
937{
938	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
939	int committed, i;
940	u32 ctrl;
941
942	if (!hdm)
943		return;
944
945	/*
946	 * Since the register resource was recently claimed via request_region()
947	 * be careful about trusting the "not-committed" status until the commit
948	 * timeout has elapsed.  The commit timeout is 10ms (CXL 2.0
949	 * 8.2.5.12.20), but double it to be tolerant of any clock skew between
950	 * host and target.
951	 */
952	for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) {
953		ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
954		if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)
955			committed++;
956	}
957
958	/* ensure that future checks of committed can be trusted */
959	if (committed != cxlhdm->decoder_count)
960		msleep(20);
961}
962
963/**
964 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
965 * @cxlhdm: Structure to populate with HDM capabilities
966 * @info: cached DVSEC range register info
967 */
968int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
969				struct cxl_endpoint_dvsec_info *info)
970{
971	void __iomem *hdm = cxlhdm->regs.hdm_decoder;
972	struct cxl_port *port = cxlhdm->port;
973	int i;
974	u64 dpa_base = 0;
975
976	cxl_settle_decoders(cxlhdm);
977
978	for (i = 0; i < cxlhdm->decoder_count; i++) {
979		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
980		int rc, target_count = cxlhdm->target_count;
981		struct cxl_decoder *cxld;
982
983		if (is_cxl_endpoint(port)) {
984			struct cxl_endpoint_decoder *cxled;
985
986			cxled = cxl_endpoint_decoder_alloc(port);
987			if (IS_ERR(cxled)) {
988				dev_warn(&port->dev,
989					 "Failed to allocate decoder%d.%d\n",
990					 port->id, i);
991				return PTR_ERR(cxled);
992			}
993			cxld = &cxled->cxld;
994		} else {
995			struct cxl_switch_decoder *cxlsd;
996
997			cxlsd = cxl_switch_decoder_alloc(port, target_count);
998			if (IS_ERR(cxlsd)) {
999				dev_warn(&port->dev,
1000					 "Failed to allocate decoder%d.%d\n",
1001					 port->id, i);
1002				return PTR_ERR(cxlsd);
1003			}
1004			cxld = &cxlsd->cxld;
1005		}
1006
1007		rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
1008				      &dpa_base, info);
1009		if (rc) {
1010			dev_warn(&port->dev,
1011				 "Failed to initialize decoder%d.%d\n",
1012				 port->id, i);
1013			put_device(&cxld->dev);
1014			return rc;
1015		}
1016		rc = add_hdm_decoder(port, cxld, target_map);
1017		if (rc) {
1018			dev_warn(&port->dev,
1019				 "Failed to add decoder%d.%d\n", port->id, i);
1020			return rc;
1021		}
1022	}
1023
1024	return 0;
1025}
1026EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
1027