1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for Pondicherry2 memory controller.
4 *
5 * Copyright (c) 2016, Intel Corporation.
6 *
7 * [Derived from sb_edac.c]
8 *
9 * Translation of system physical addresses to DIMM addresses
10 * is a two stage process:
11 *
12 * First the Pondicherry 2 memory controller handles slice and channel interleaving
13 * in "sys2pmi()". This is (almost) completley common between platforms.
14 *
15 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
16 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
17 */
18
19#include <linux/bitmap.h>
20#include <linux/delay.h>
21#include <linux/edac.h>
22#include <linux/init.h>
23#include <linux/math64.h>
24#include <linux/mmzone.h>
25#include <linux/mod_devicetable.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/pci_ids.h>
29#include <linux/sizes.h>
30#include <linux/slab.h>
31#include <linux/smp.h>
32
33#include <linux/platform_data/x86/p2sb.h>
34
35#include <asm/cpu_device_id.h>
36#include <asm/intel-family.h>
37#include <asm/processor.h>
38#include <asm/mce.h>
39
40#include "edac_mc.h"
41#include "edac_module.h"
42#include "pnd2_edac.h"
43
44#define EDAC_MOD_STR		"pnd2_edac"
45
46#define APL_NUM_CHANNELS	4
47#define DNV_NUM_CHANNELS	2
48#define DNV_MAX_DIMMS		2 /* Max DIMMs per channel */
49
50enum type {
51	APL,
52	DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
53};
54
55struct dram_addr {
56	int chan;
57	int dimm;
58	int rank;
59	int bank;
60	int row;
61	int col;
62};
63
64struct pnd2_pvt {
65	int dimm_geom[APL_NUM_CHANNELS];
66	u64 tolm, tohm;
67};
68
69/*
70 * System address space is divided into multiple regions with
71 * different interleave rules in each. The as0/as1 regions
72 * have no interleaving at all. The as2 region is interleaved
73 * between two channels. The mot region is magic and may overlap
74 * other regions, with its interleave rules taking precedence.
75 * Addresses not in any of these regions are interleaved across
76 * all four channels.
77 */
78static struct region {
79	u64	base;
80	u64	limit;
81	u8	enabled;
82} mot, as0, as1, as2;
83
84static struct dunit_ops {
85	char *name;
86	enum type type;
87	int pmiaddr_shift;
88	int pmiidx_shift;
89	int channels;
90	int dimms_per_channel;
91	int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
92	int (*get_registers)(void);
93	int (*check_ecc)(void);
94	void (*mk_region)(char *name, struct region *rp, void *asym);
95	void (*get_dimm_config)(struct mem_ctl_info *mci);
96	int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
97				   struct dram_addr *daddr, char *msg);
98} *ops;
99
100static struct mem_ctl_info *pnd2_mci;
101
102#define PND2_MSG_SIZE	256
103
104/* Debug macros */
105#define pnd2_printk(level, fmt, arg...)			\
106	edac_printk(level, "pnd2", fmt, ##arg)
107
108#define pnd2_mc_printk(mci, level, fmt, arg...)	\
109	edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
110
111#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
112#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
113#define SELECTOR_DISABLED (-1)
114
115#define PMI_ADDRESS_WIDTH	31
116#define PND_MAX_PHYS_BIT	39
117
118#define APL_ASYMSHIFT		28
119#define DNV_ASYMSHIFT		31
120#define CH_HASH_MASK_LSB	6
121#define SLICE_HASH_MASK_LSB	6
122#define MOT_SLC_INTLV_BIT	12
123#define LOG2_PMI_ADDR_GRANULARITY	5
124#define MOT_SHIFT	24
125
126#define GET_BITFIELD(v, lo, hi)	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
127#define U64_LSHIFT(val, s)	((u64)(val) << (s))
128
129/*
130 * On Apollo Lake we access memory controller registers via a
131 * side-band mailbox style interface in a hidden PCI device
132 * configuration space.
133 */
134static struct pci_bus	*p2sb_bus;
135#define P2SB_DEVFN	PCI_DEVFN(0xd, 0)
136#define P2SB_ADDR_OFF	0xd0
137#define P2SB_DATA_OFF	0xd4
138#define P2SB_STAT_OFF	0xd8
139#define P2SB_ROUT_OFF	0xda
140#define P2SB_EADD_OFF	0xdc
141#define P2SB_HIDE_OFF	0xe1
142
143#define P2SB_BUSY	1
144
145#define P2SB_READ(size, off, ptr) \
146	pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
147#define P2SB_WRITE(size, off, val) \
148	pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
149
150static bool p2sb_is_busy(u16 *status)
151{
152	P2SB_READ(word, P2SB_STAT_OFF, status);
153
154	return !!(*status & P2SB_BUSY);
155}
156
157static int _apl_rd_reg(int port, int off, int op, u32 *data)
158{
159	int retries = 0xff, ret;
160	u16 status;
161	u8 hidden;
162
163	/* Unhide the P2SB device, if it's hidden */
164	P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
165	if (hidden)
166		P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
167
168	if (p2sb_is_busy(&status)) {
169		ret = -EAGAIN;
170		goto out;
171	}
172
173	P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
174	P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
175	P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
176	P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
177	P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
178
179	while (p2sb_is_busy(&status)) {
180		if (retries-- == 0) {
181			ret = -EBUSY;
182			goto out;
183		}
184	}
185
186	P2SB_READ(dword, P2SB_DATA_OFF, data);
187	ret = (status >> 1) & GENMASK(1, 0);
188out:
189	/* Hide the P2SB device, if it was hidden before */
190	if (hidden)
191		P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
192
193	return ret;
194}
195
196static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
197{
198	int ret = 0;
199
200	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
201	switch (sz) {
202	case 8:
203		ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
204		fallthrough;
205	case 4:
206		ret |= _apl_rd_reg(port, off, op, (u32 *)data);
207		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
208					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
209		break;
210	}
211
212	return ret;
213}
214
215static u64 get_mem_ctrl_hub_base_addr(void)
216{
217	struct b_cr_mchbar_lo_pci lo;
218	struct b_cr_mchbar_hi_pci hi;
219	struct pci_dev *pdev;
220
221	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
222	if (pdev) {
223		pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
224		pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
225		pci_dev_put(pdev);
226	} else {
227		return 0;
228	}
229
230	if (!lo.enable) {
231		edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
232		return 0;
233	}
234
235	return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
236}
237
238#define DNV_MCHBAR_SIZE  0x8000
239#define DNV_SB_PORT_SIZE 0x10000
240static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
241{
242	struct pci_dev *pdev;
243	void __iomem *base;
244	struct resource r;
245	int ret;
246
247	if (op == 4) {
248		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
249		if (!pdev)
250			return -ENODEV;
251
252		pci_read_config_dword(pdev, off, data);
253		pci_dev_put(pdev);
254	} else {
255		/* MMIO via memory controller hub base address */
256		if (op == 0 && port == 0x4c) {
257			memset(&r, 0, sizeof(r));
258
259			r.start = get_mem_ctrl_hub_base_addr();
260			if (!r.start)
261				return -ENODEV;
262			r.end = r.start + DNV_MCHBAR_SIZE - 1;
263		} else {
264			/* MMIO via sideband register base address */
265			ret = p2sb_bar(NULL, 0, &r);
266			if (ret)
267				return ret;
268
269			r.start += (port << 16);
270			r.end = r.start + DNV_SB_PORT_SIZE - 1;
271		}
272
273		base = ioremap(r.start, resource_size(&r));
274		if (!base)
275			return -ENODEV;
276
277		if (sz == 8)
278			*(u64 *)data = readq(base + off);
279		else
280			*(u32 *)data = readl(base + off);
281
282		iounmap(base);
283	}
284
285	edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
286			(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
287
288	return 0;
289}
290
291#define RD_REGP(regp, regname, port)	\
292	ops->rd_reg(port,					\
293		regname##_offset,				\
294		regname##_r_opcode,				\
295		regp, sizeof(struct regname),	\
296		#regname)
297
298#define RD_REG(regp, regname)			\
299	ops->rd_reg(regname ## _port,		\
300		regname##_offset,				\
301		regname##_r_opcode,				\
302		regp, sizeof(struct regname),	\
303		#regname)
304
305static u64 top_lm, top_hm;
306static bool two_slices;
307static bool two_channels; /* Both PMI channels in one slice enabled */
308
309static u8 sym_chan_mask;
310static u8 asym_chan_mask;
311static unsigned long chan_mask;
312
313static int slice_selector = -1;
314static int chan_selector = -1;
315static u64 slice_hash_mask;
316static u64 chan_hash_mask;
317
318static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
319{
320	rp->enabled = 1;
321	rp->base = base;
322	rp->limit = limit;
323	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
324}
325
326static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
327{
328	if (mask == 0) {
329		pr_info(FW_BUG "MOT mask cannot be zero\n");
330		return;
331	}
332	if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
333		pr_info(FW_BUG "MOT mask is invalid\n");
334		return;
335	}
336	if (base & ~mask) {
337		pr_info(FW_BUG "MOT region base/mask alignment error\n");
338		return;
339	}
340	rp->base = base;
341	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
342	rp->enabled = 1;
343	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
344}
345
346static bool in_region(struct region *rp, u64 addr)
347{
348	if (!rp->enabled)
349		return false;
350
351	return rp->base <= addr && addr <= rp->limit;
352}
353
354static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
355{
356	int mask = 0;
357
358	if (!p->slice_0_mem_disabled)
359		mask |= p->sym_slice0_channel_enabled;
360
361	if (!p->slice_1_disabled)
362		mask |= p->sym_slice1_channel_enabled << 2;
363
364	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
365		mask &= 0x5;
366
367	return mask;
368}
369
370static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
371			 struct b_cr_asym_mem_region0_mchbar *as0,
372			 struct b_cr_asym_mem_region1_mchbar *as1,
373			 struct b_cr_asym_2way_mem_region_mchbar *as2way)
374{
375	const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
376	int mask = 0;
377
378	if (as2way->asym_2way_interleave_enable)
379		mask = intlv[as2way->asym_2way_intlv_mode];
380	if (as0->slice0_asym_enable)
381		mask |= (1 << as0->slice0_asym_channel_select);
382	if (as1->slice1_asym_enable)
383		mask |= (4 << as1->slice1_asym_channel_select);
384	if (p->slice_0_mem_disabled)
385		mask &= 0xc;
386	if (p->slice_1_disabled)
387		mask &= 0x3;
388	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
389		mask &= 0x5;
390
391	return mask;
392}
393
394static struct b_cr_tolud_pci tolud;
395static struct b_cr_touud_lo_pci touud_lo;
396static struct b_cr_touud_hi_pci touud_hi;
397static struct b_cr_asym_mem_region0_mchbar asym0;
398static struct b_cr_asym_mem_region1_mchbar asym1;
399static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
400static struct b_cr_mot_out_base_mchbar mot_base;
401static struct b_cr_mot_out_mask_mchbar mot_mask;
402static struct b_cr_slice_channel_hash chash;
403
404/* Apollo Lake dunit */
405/*
406 * Validated on board with just two DIMMs in the [0] and [2] positions
407 * in this array. Other port number matches documentation, but caution
408 * advised.
409 */
410static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
411static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
412
413/* Denverton dunit */
414static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
415static struct d_cr_dsch dsch;
416static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
417static struct d_cr_drp drp[DNV_NUM_CHANNELS];
418static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
419static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
420static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
421static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
422static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
423static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
424
425static void apl_mk_region(char *name, struct region *rp, void *asym)
426{
427	struct b_cr_asym_mem_region0_mchbar *a = asym;
428
429	mk_region(name, rp,
430			  U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
431			  U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
432			  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
433}
434
435static void dnv_mk_region(char *name, struct region *rp, void *asym)
436{
437	struct b_cr_asym_mem_region_denverton *a = asym;
438
439	mk_region(name, rp,
440			  U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
441			  U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
442			  GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
443}
444
445static int apl_get_registers(void)
446{
447	int ret = -ENODEV;
448	int i;
449
450	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
451		return -ENODEV;
452
453	/*
454	 * RD_REGP() will fail for unpopulated or non-existent
455	 * DIMM slots. Return success if we find at least one DIMM.
456	 */
457	for (i = 0; i < APL_NUM_CHANNELS; i++)
458		if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
459			ret = 0;
460
461	return ret;
462}
463
464static int dnv_get_registers(void)
465{
466	int i;
467
468	if (RD_REG(&dsch, d_cr_dsch))
469		return -ENODEV;
470
471	for (i = 0; i < DNV_NUM_CHANNELS; i++)
472		if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
473			RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
474			RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
475			RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
476			RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
477			RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
478			RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
479			RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
480			return -ENODEV;
481
482	return 0;
483}
484
485/*
486 * Read all the h/w config registers once here (they don't
487 * change at run time. Figure out which address ranges have
488 * which interleave characteristics.
489 */
490static int get_registers(void)
491{
492	const int intlv[] = { 10, 11, 12, 12 };
493
494	if (RD_REG(&tolud, b_cr_tolud_pci) ||
495		RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
496		RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
497		RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
498		RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
499		RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
500		RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
501		RD_REG(&chash, b_cr_slice_channel_hash))
502		return -ENODEV;
503
504	if (ops->get_registers())
505		return -ENODEV;
506
507	if (ops->type == DNV) {
508		/* PMI channel idx (always 0) for asymmetric region */
509		asym0.slice0_asym_channel_select = 0;
510		asym1.slice1_asym_channel_select = 0;
511		/* PMI channel bitmap (always 1) for symmetric region */
512		chash.sym_slice0_channel_enabled = 0x1;
513		chash.sym_slice1_channel_enabled = 0x1;
514	}
515
516	if (asym0.slice0_asym_enable)
517		ops->mk_region("as0", &as0, &asym0);
518
519	if (asym1.slice1_asym_enable)
520		ops->mk_region("as1", &as1, &asym1);
521
522	if (asym_2way.asym_2way_interleave_enable) {
523		mk_region("as2way", &as2,
524				  U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
525				  U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
526				  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
527	}
528
529	if (mot_base.imr_en) {
530		mk_region_mask("mot", &mot,
531					   U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
532					   U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
533	}
534
535	top_lm = U64_LSHIFT(tolud.tolud, 20);
536	top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
537
538	two_slices = !chash.slice_1_disabled &&
539				 !chash.slice_0_mem_disabled &&
540				 (chash.sym_slice0_channel_enabled != 0) &&
541				 (chash.sym_slice1_channel_enabled != 0);
542	two_channels = !chash.ch_1_disabled &&
543				 !chash.enable_pmi_dual_data_mode &&
544				 ((chash.sym_slice0_channel_enabled == 3) ||
545				 (chash.sym_slice1_channel_enabled == 3));
546
547	sym_chan_mask = gen_sym_mask(&chash);
548	asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
549	chan_mask = sym_chan_mask | asym_chan_mask;
550
551	if (two_slices && !two_channels) {
552		if (chash.hvm_mode)
553			slice_selector = 29;
554		else
555			slice_selector = intlv[chash.interleave_mode];
556	} else if (!two_slices && two_channels) {
557		if (chash.hvm_mode)
558			chan_selector = 29;
559		else
560			chan_selector = intlv[chash.interleave_mode];
561	} else if (two_slices && two_channels) {
562		if (chash.hvm_mode) {
563			slice_selector = 29;
564			chan_selector = 30;
565		} else {
566			slice_selector = intlv[chash.interleave_mode];
567			chan_selector = intlv[chash.interleave_mode] + 1;
568		}
569	}
570
571	if (two_slices) {
572		if (!chash.hvm_mode)
573			slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
574		if (!two_channels)
575			slice_hash_mask |= BIT_ULL(slice_selector);
576	}
577
578	if (two_channels) {
579		if (!chash.hvm_mode)
580			chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
581		if (!two_slices)
582			chan_hash_mask |= BIT_ULL(chan_selector);
583	}
584
585	return 0;
586}
587
588/* Get a contiguous memory address (remove the MMIO gap) */
589static u64 remove_mmio_gap(u64 sys)
590{
591	return (sys < SZ_4G) ? sys : sys - (SZ_4G - top_lm);
592}
593
594/* Squeeze out one address bit, shift upper part down to fill gap */
595static void remove_addr_bit(u64 *addr, int bitidx)
596{
597	u64	mask;
598
599	if (bitidx == -1)
600		return;
601
602	mask = BIT_ULL(bitidx) - 1;
603	*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
604}
605
606/* XOR all the bits from addr specified in mask */
607static int hash_by_mask(u64 addr, u64 mask)
608{
609	u64 result = addr & mask;
610
611	result = (result >> 32) ^ result;
612	result = (result >> 16) ^ result;
613	result = (result >> 8) ^ result;
614	result = (result >> 4) ^ result;
615	result = (result >> 2) ^ result;
616	result = (result >> 1) ^ result;
617
618	return (int)result & 1;
619}
620
621/*
622 * First stage decode. Take the system address and figure out which
623 * second stage will deal with it based on interleave modes.
624 */
625static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
626{
627	u64 contig_addr, contig_base, contig_offset, contig_base_adj;
628	int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
629						MOT_CHAN_INTLV_BIT_1SLC_2CH;
630	int slice_intlv_bit_rm = SELECTOR_DISABLED;
631	int chan_intlv_bit_rm = SELECTOR_DISABLED;
632	/* Determine if address is in the MOT region. */
633	bool mot_hit = in_region(&mot, addr);
634	/* Calculate the number of symmetric regions enabled. */
635	int sym_channels = hweight8(sym_chan_mask);
636
637	/*
638	 * The amount we need to shift the asym base can be determined by the
639	 * number of enabled symmetric channels.
640	 * NOTE: This can only work because symmetric memory is not supposed
641	 * to do a 3-way interleave.
642	 */
643	int sym_chan_shift = sym_channels >> 1;
644
645	/* Give up if address is out of range, or in MMIO gap */
646	if (addr >= BIT(PND_MAX_PHYS_BIT) ||
647	   (addr >= top_lm && addr < SZ_4G) || addr >= top_hm) {
648		snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
649		return -EINVAL;
650	}
651
652	/* Get a contiguous memory address (remove the MMIO gap) */
653	contig_addr = remove_mmio_gap(addr);
654
655	if (in_region(&as0, addr)) {
656		*pmiidx = asym0.slice0_asym_channel_select;
657
658		contig_base = remove_mmio_gap(as0.base);
659		contig_offset = contig_addr - contig_base;
660		contig_base_adj = (contig_base >> sym_chan_shift) *
661						  ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
662		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
663	} else if (in_region(&as1, addr)) {
664		*pmiidx = 2u + asym1.slice1_asym_channel_select;
665
666		contig_base = remove_mmio_gap(as1.base);
667		contig_offset = contig_addr - contig_base;
668		contig_base_adj = (contig_base >> sym_chan_shift) *
669						  ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
670		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
671	} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
672		bool channel1;
673
674		mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
675		*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
676		channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
677			hash_by_mask(contig_addr, chan_hash_mask);
678		*pmiidx |= (u32)channel1;
679
680		contig_base = remove_mmio_gap(as2.base);
681		chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
682		contig_offset = contig_addr - contig_base;
683		remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
684		contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
685	} else {
686		/* Otherwise we're in normal, boring symmetric mode. */
687		*pmiidx = 0u;
688
689		if (two_slices) {
690			bool slice1;
691
692			if (mot_hit) {
693				slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
694				slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
695			} else {
696				slice_intlv_bit_rm = slice_selector;
697				slice1 = hash_by_mask(addr, slice_hash_mask);
698			}
699
700			*pmiidx = (u32)slice1 << 1;
701		}
702
703		if (two_channels) {
704			bool channel1;
705
706			mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
707							MOT_CHAN_INTLV_BIT_1SLC_2CH;
708
709			if (mot_hit) {
710				chan_intlv_bit_rm = mot_intlv_bit;
711				channel1 = (addr >> mot_intlv_bit) & 1;
712			} else {
713				chan_intlv_bit_rm = chan_selector;
714				channel1 = hash_by_mask(contig_addr, chan_hash_mask);
715			}
716
717			*pmiidx |= (u32)channel1;
718		}
719	}
720
721	/* Remove the chan_selector bit first */
722	remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
723	/* Remove the slice bit (we remove it second because it must be lower */
724	remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
725	*pmiaddr = contig_addr;
726
727	return 0;
728}
729
730/* Translate PMI address to memory (rank, row, bank, column) */
731#define C(n) (BIT(4) | (n))	/* column */
732#define B(n) (BIT(5) | (n))	/* bank */
733#define R(n) (BIT(6) | (n))	/* row */
734#define RS   (BIT(7))		/* rank */
735
736/* addrdec values */
737#define AMAP_1KB	0
738#define AMAP_2KB	1
739#define AMAP_4KB	2
740#define AMAP_RSVD	3
741
742/* dden values */
743#define DEN_4Gb		0
744#define DEN_8Gb		2
745
746/* dwid values */
747#define X8		0
748#define X16		1
749
750static struct dimm_geometry {
751	u8	addrdec;
752	u8	dden;
753	u8	dwid;
754	u8	rowbits, colbits;
755	u16	bits[PMI_ADDRESS_WIDTH];
756} dimms[] = {
757	{
758		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
759		.rowbits = 15, .colbits = 10,
760		.bits = {
761			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
762			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
763			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
764			0,     0,     0,     0
765		}
766	},
767	{
768		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
769		.rowbits = 16, .colbits = 10,
770		.bits = {
771			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
772			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
773			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
774			R(15), 0,     0,     0
775		}
776	},
777	{
778		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
779		.rowbits = 16, .colbits = 10,
780		.bits = {
781			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
782			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
783			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
784			R(15), 0,     0,     0
785		}
786	},
787	{
788		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
789		.rowbits = 16, .colbits = 11,
790		.bits = {
791			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
792			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
793			R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
794			R(14), R(15), 0,     0
795		}
796	},
797	{
798		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
799		.rowbits = 15, .colbits = 10,
800		.bits = {
801			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
802			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
803			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
804			0,     0,     0,     0
805		}
806	},
807	{
808		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
809		.rowbits = 16, .colbits = 10,
810		.bits = {
811			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
812			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
813			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
814			R(15), 0,     0,     0
815		}
816	},
817	{
818		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
819		.rowbits = 16, .colbits = 10,
820		.bits = {
821			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
822			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
823			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
824			R(15), 0,     0,     0
825		}
826	},
827	{
828		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
829		.rowbits = 16, .colbits = 11,
830		.bits = {
831			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
832			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
833			R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
834			R(14), R(15), 0,     0
835		}
836	},
837	{
838		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
839		.rowbits = 15, .colbits = 10,
840		.bits = {
841			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
842			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
843			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
844			0,     0,     0,     0
845		}
846	},
847	{
848		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
849		.rowbits = 16, .colbits = 10,
850		.bits = {
851			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
852			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
853			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
854			R(15), 0,     0,     0
855		}
856	},
857	{
858		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
859		.rowbits = 16, .colbits = 10,
860		.bits = {
861			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
862			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
863			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
864			R(15), 0,     0,     0
865		}
866	},
867	{
868		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
869		.rowbits = 16, .colbits = 11,
870		.bits = {
871			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
872			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
873			R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
874			R(14), R(15), 0,     0
875		}
876	}
877};
878
879static int bank_hash(u64 pmiaddr, int idx, int shft)
880{
881	int bhash = 0;
882
883	switch (idx) {
884	case 0:
885		bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
886		break;
887	case 1:
888		bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
889		bhash ^= ((pmiaddr >> 22) & 1) << 1;
890		break;
891	case 2:
892		bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
893		break;
894	}
895
896	return bhash;
897}
898
899static int rank_hash(u64 pmiaddr)
900{
901	return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
902}
903
904/* Second stage decode. Compute rank, bank, row & column. */
905static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
906		       struct dram_addr *daddr, char *msg)
907{
908	struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
909	struct pnd2_pvt *pvt = mci->pvt_info;
910	int g = pvt->dimm_geom[pmiidx];
911	struct dimm_geometry *d = &dimms[g];
912	int column = 0, bank = 0, row = 0, rank = 0;
913	int i, idx, type, skiprs = 0;
914
915	for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
916		int	bit = (pmiaddr >> i) & 1;
917
918		if (i + skiprs >= PMI_ADDRESS_WIDTH) {
919			snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
920			return -EINVAL;
921		}
922
923		type = d->bits[i + skiprs] & ~0xf;
924		idx = d->bits[i + skiprs] & 0xf;
925
926		/*
927		 * On single rank DIMMs ignore the rank select bit
928		 * and shift remainder of "bits[]" down one place.
929		 */
930		if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
931			skiprs = 1;
932			type = d->bits[i + skiprs] & ~0xf;
933			idx = d->bits[i + skiprs] & 0xf;
934		}
935
936		switch (type) {
937		case C(0):
938			column |= (bit << idx);
939			break;
940		case B(0):
941			bank |= (bit << idx);
942			if (cr_drp0->bahen)
943				bank ^= bank_hash(pmiaddr, idx, d->addrdec);
944			break;
945		case R(0):
946			row |= (bit << idx);
947			break;
948		case RS:
949			rank = bit;
950			if (cr_drp0->rsien)
951				rank ^= rank_hash(pmiaddr);
952			break;
953		default:
954			if (bit) {
955				snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
956				return -EINVAL;
957			}
958			goto done;
959		}
960	}
961
962done:
963	daddr->col = column;
964	daddr->bank = bank;
965	daddr->row = row;
966	daddr->rank = rank;
967	daddr->dimm = 0;
968
969	return 0;
970}
971
972/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
973#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
974
975static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
976					   struct dram_addr *daddr, char *msg)
977{
978	/* Rank 0 or 1 */
979	daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
980	/* Rank 2 or 3 */
981	daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
982
983	/*
984	 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
985	 * flip them if DIMM1 is larger than DIMM0.
986	 */
987	daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
988
989	daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
990	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
991	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
992	if (dsch.ddr4en)
993		daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
994	if (dmap1[pmiidx].bxor) {
995		if (dsch.ddr4en) {
996			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
997			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
998			if (dsch.chan_width == 0)
999				/* 64/72 bit dram channel width */
1000				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1001			else
1002				/* 32/40 bit dram channel width */
1003				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1004			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1005		} else {
1006			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1007			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1008			if (dsch.chan_width == 0)
1009				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1010			else
1011				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1012		}
1013	}
1014
1015	daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1016	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1017	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1018	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1019	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1020	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1021	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1022	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1023	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1024	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1025	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1026	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1027	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1028	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1029	if (dmap4[pmiidx].row14 != 31)
1030		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1031	if (dmap4[pmiidx].row15 != 31)
1032		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1033	if (dmap4[pmiidx].row16 != 31)
1034		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1035	if (dmap4[pmiidx].row17 != 31)
1036		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1037
1038	daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1039	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1040	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1041	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1042	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1043	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1044	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1045	if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1046		daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1047
1048	return 0;
1049}
1050
1051static int check_channel(int ch)
1052{
1053	if (drp0[ch].dramtype != 0) {
1054		pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1055		return 1;
1056	} else if (drp0[ch].eccen == 0) {
1057		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1058		return 1;
1059	}
1060	return 0;
1061}
1062
1063static int apl_check_ecc_active(void)
1064{
1065	int	i, ret = 0;
1066
1067	/* Check dramtype and ECC mode for each present DIMM */
1068	for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS)
1069		ret += check_channel(i);
1070
1071	return ret ? -EINVAL : 0;
1072}
1073
1074#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1075
1076static int check_unit(int ch)
1077{
1078	struct d_cr_drp *d = &drp[ch];
1079
1080	if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1081		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1082		return 1;
1083	}
1084	return 0;
1085}
1086
1087static int dnv_check_ecc_active(void)
1088{
1089	int	i, ret = 0;
1090
1091	for (i = 0; i < DNV_NUM_CHANNELS; i++)
1092		ret += check_unit(i);
1093	return ret ? -EINVAL : 0;
1094}
1095
1096static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1097								 struct dram_addr *daddr, char *msg)
1098{
1099	u64	pmiaddr;
1100	u32	pmiidx;
1101	int	ret;
1102
1103	ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1104	if (ret)
1105		return ret;
1106
1107	pmiaddr >>= ops->pmiaddr_shift;
1108	/* pmi channel idx to dimm channel idx */
1109	pmiidx >>= ops->pmiidx_shift;
1110	daddr->chan = pmiidx;
1111
1112	ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1113	if (ret)
1114		return ret;
1115
1116	edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1117			 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1118
1119	return 0;
1120}
1121
1122static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1123				  struct dram_addr *daddr)
1124{
1125	enum hw_event_mc_err_type tp_event;
1126	char *optype, msg[PND2_MSG_SIZE];
1127	bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1128	bool overflow = m->status & MCI_STATUS_OVER;
1129	bool uc_err = m->status & MCI_STATUS_UC;
1130	bool recov = m->status & MCI_STATUS_S;
1131	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1132	u32 mscod = GET_BITFIELD(m->status, 16, 31);
1133	u32 errcode = GET_BITFIELD(m->status, 0, 15);
1134	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1135	int rc;
1136
1137	tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
1138						 HW_EVENT_ERR_CORRECTED;
1139
1140	/*
1141	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1142	 * memory errors should fit in this mask:
1143	 *	000f 0000 1mmm cccc (binary)
1144	 * where:
1145	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
1146	 *	    won't be shown
1147	 *	mmm = error type
1148	 *	cccc = channel
1149	 * If the mask doesn't match, report an error to the parsing logic
1150	 */
1151	if (!((errcode & 0xef80) == 0x80)) {
1152		optype = "Can't parse: it is not a mem";
1153	} else {
1154		switch (optypenum) {
1155		case 0:
1156			optype = "generic undef request error";
1157			break;
1158		case 1:
1159			optype = "memory read error";
1160			break;
1161		case 2:
1162			optype = "memory write error";
1163			break;
1164		case 3:
1165			optype = "addr/cmd error";
1166			break;
1167		case 4:
1168			optype = "memory scrubbing error";
1169			break;
1170		default:
1171			optype = "reserved";
1172			break;
1173		}
1174	}
1175
1176	/* Only decode errors with an valid address (ADDRV) */
1177	if (!(m->status & MCI_STATUS_ADDRV))
1178		return;
1179
1180	rc = get_memory_error_data(mci, m->addr, daddr, msg);
1181	if (rc)
1182		goto address_error;
1183
1184	snprintf(msg, sizeof(msg),
1185		 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1186		 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1187		 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1188
1189	edac_dbg(0, "%s\n", msg);
1190
1191	/* Call the helper to output message */
1192	edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1193						 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1194
1195	return;
1196
1197address_error:
1198	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1199}
1200
1201static void apl_get_dimm_config(struct mem_ctl_info *mci)
1202{
1203	struct pnd2_pvt	*pvt = mci->pvt_info;
1204	struct dimm_info *dimm;
1205	struct d_cr_drp0 *d;
1206	u64	capacity;
1207	int	i, g;
1208
1209	for_each_set_bit(i, &chan_mask, APL_NUM_CHANNELS) {
1210		dimm = edac_get_dimm(mci, i, 0, 0);
1211		if (!dimm) {
1212			edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1213			continue;
1214		}
1215
1216		d = &drp0[i];
1217		for (g = 0; g < ARRAY_SIZE(dimms); g++)
1218			if (dimms[g].addrdec == d->addrdec &&
1219			    dimms[g].dden == d->dden &&
1220			    dimms[g].dwid == d->dwid)
1221				break;
1222
1223		if (g == ARRAY_SIZE(dimms)) {
1224			edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1225			continue;
1226		}
1227
1228		pvt->dimm_geom[i] = g;
1229		capacity = (d->rken0 + d->rken1) * 8 * BIT(dimms[g].rowbits + dimms[g].colbits);
1230		edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1231		dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1232		dimm->grain = 32;
1233		dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1234		dimm->mtype = MEM_DDR3;
1235		dimm->edac_mode = EDAC_SECDED;
1236		snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1237	}
1238}
1239
1240static const int dnv_dtypes[] = {
1241	DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1242};
1243
1244static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1245{
1246	int	i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1247	struct dimm_info *dimm;
1248	struct d_cr_drp *d;
1249	u64	capacity;
1250
1251	if (dsch.ddr4en) {
1252		memtype = MEM_DDR4;
1253		banks = 16;
1254		colbits = 10;
1255	} else {
1256		memtype = MEM_DDR3;
1257		banks = 8;
1258	}
1259
1260	for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1261		if (dmap4[i].row14 == 31)
1262			rowbits = 14;
1263		else if (dmap4[i].row15 == 31)
1264			rowbits = 15;
1265		else if (dmap4[i].row16 == 31)
1266			rowbits = 16;
1267		else if (dmap4[i].row17 == 31)
1268			rowbits = 17;
1269		else
1270			rowbits = 18;
1271
1272		if (memtype == MEM_DDR3) {
1273			if (dmap1[i].ca11 != 0x3f)
1274				colbits = 12;
1275			else
1276				colbits = 10;
1277		}
1278
1279		d = &drp[i];
1280		/* DIMM0 is present if rank0 and/or rank1 is enabled */
1281		ranks_of_dimm[0] = d->rken0 + d->rken1;
1282		/* DIMM1 is present if rank2 and/or rank3 is enabled */
1283		ranks_of_dimm[1] = d->rken2 + d->rken3;
1284
1285		for (j = 0; j < DNV_MAX_DIMMS; j++) {
1286			if (!ranks_of_dimm[j])
1287				continue;
1288
1289			dimm = edac_get_dimm(mci, i, j, 0);
1290			if (!dimm) {
1291				edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1292				continue;
1293			}
1294
1295			capacity = ranks_of_dimm[j] * banks * BIT(rowbits + colbits);
1296			edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1297			dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1298			dimm->grain = 32;
1299			dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1300			dimm->mtype = memtype;
1301			dimm->edac_mode = EDAC_SECDED;
1302			snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1303		}
1304	}
1305}
1306
1307static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1308{
1309	struct edac_mc_layer layers[2];
1310	struct mem_ctl_info *mci;
1311	struct pnd2_pvt *pvt;
1312	int rc;
1313
1314	rc = ops->check_ecc();
1315	if (rc < 0)
1316		return rc;
1317
1318	/* Allocate a new MC control structure */
1319	layers[0].type = EDAC_MC_LAYER_CHANNEL;
1320	layers[0].size = ops->channels;
1321	layers[0].is_virt_csrow = false;
1322	layers[1].type = EDAC_MC_LAYER_SLOT;
1323	layers[1].size = ops->dimms_per_channel;
1324	layers[1].is_virt_csrow = true;
1325	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1326	if (!mci)
1327		return -ENOMEM;
1328
1329	pvt = mci->pvt_info;
1330	memset(pvt, 0, sizeof(*pvt));
1331
1332	mci->mod_name = EDAC_MOD_STR;
1333	mci->dev_name = ops->name;
1334	mci->ctl_name = "Pondicherry2";
1335
1336	/* Get dimm basic config and the memory layout */
1337	ops->get_dimm_config(mci);
1338
1339	if (edac_mc_add_mc(mci)) {
1340		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1341		edac_mc_free(mci);
1342		return -EINVAL;
1343	}
1344
1345	*ppmci = mci;
1346
1347	return 0;
1348}
1349
1350static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1351{
1352	if (unlikely(!mci || !mci->pvt_info)) {
1353		pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1354		return;
1355	}
1356
1357	/* Remove MC sysfs nodes */
1358	edac_mc_del_mc(NULL);
1359	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1360	edac_mc_free(mci);
1361}
1362
1363/*
1364 * Callback function registered with core kernel mce code.
1365 * Called once for each logged error.
1366 */
1367static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1368{
1369	struct mce *mce = (struct mce *)data;
1370	struct mem_ctl_info *mci;
1371	struct dram_addr daddr;
1372	char *type;
1373
1374	mci = pnd2_mci;
1375	if (!mci || (mce->kflags & MCE_HANDLED_CEC))
1376		return NOTIFY_DONE;
1377
1378	/*
1379	 * Just let mcelog handle it if the error is
1380	 * outside the memory controller. A memory error
1381	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1382	 * bit 12 has an special meaning.
1383	 */
1384	if ((mce->status & 0xefff) >> 7 != 1)
1385		return NOTIFY_DONE;
1386
1387	if (mce->mcgstatus & MCG_STATUS_MCIP)
1388		type = "Exception";
1389	else
1390		type = "Event";
1391
1392	pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1393	pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1394				   mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1395	pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1396	pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1397	pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1398	pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1399				   mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1400
1401	pnd2_mce_output_error(mci, mce, &daddr);
1402
1403	/* Advice mcelog that the error were handled */
1404	mce->kflags |= MCE_HANDLED_EDAC;
1405	return NOTIFY_OK;
1406}
1407
1408static struct notifier_block pnd2_mce_dec = {
1409	.notifier_call	= pnd2_mce_check_error,
1410	.priority	= MCE_PRIO_EDAC,
1411};
1412
1413#ifdef CONFIG_EDAC_DEBUG
1414/*
1415 * Write an address to this file to exercise the address decode
1416 * logic in this driver.
1417 */
1418static u64 pnd2_fake_addr;
1419#define PND2_BLOB_SIZE 1024
1420static char pnd2_result[PND2_BLOB_SIZE];
1421static struct dentry *pnd2_test;
1422static struct debugfs_blob_wrapper pnd2_blob = {
1423	.data = pnd2_result,
1424	.size = 0
1425};
1426
1427static int debugfs_u64_set(void *data, u64 val)
1428{
1429	struct dram_addr daddr;
1430	struct mce m;
1431
1432	*(u64 *)data = val;
1433	m.mcgstatus = 0;
1434	/* ADDRV + MemRd + Unknown channel */
1435	m.status = MCI_STATUS_ADDRV + 0x9f;
1436	m.addr = val;
1437	pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1438	snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1439			 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1440			 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1441	pnd2_blob.size = strlen(pnd2_blob.data);
1442
1443	return 0;
1444}
1445DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1446
1447static void setup_pnd2_debug(void)
1448{
1449	pnd2_test = edac_debugfs_create_dir("pnd2_test");
1450	edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1451							 &pnd2_fake_addr, &fops_u64_wo);
1452	debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1453}
1454
1455static void teardown_pnd2_debug(void)
1456{
1457	debugfs_remove_recursive(pnd2_test);
1458}
1459#else
1460static void setup_pnd2_debug(void)	{}
1461static void teardown_pnd2_debug(void)	{}
1462#endif /* CONFIG_EDAC_DEBUG */
1463
1464
1465static int pnd2_probe(void)
1466{
1467	int rc;
1468
1469	edac_dbg(2, "\n");
1470	rc = get_registers();
1471	if (rc)
1472		return rc;
1473
1474	return pnd2_register_mci(&pnd2_mci);
1475}
1476
1477static void pnd2_remove(void)
1478{
1479	edac_dbg(0, "\n");
1480	pnd2_unregister_mci(pnd2_mci);
1481}
1482
1483static struct dunit_ops apl_ops = {
1484		.name			= "pnd2/apl",
1485		.type			= APL,
1486		.pmiaddr_shift		= LOG2_PMI_ADDR_GRANULARITY,
1487		.pmiidx_shift		= 0,
1488		.channels		= APL_NUM_CHANNELS,
1489		.dimms_per_channel	= 1,
1490		.rd_reg			= apl_rd_reg,
1491		.get_registers		= apl_get_registers,
1492		.check_ecc		= apl_check_ecc_active,
1493		.mk_region		= apl_mk_region,
1494		.get_dimm_config	= apl_get_dimm_config,
1495		.pmi2mem		= apl_pmi2mem,
1496};
1497
1498static struct dunit_ops dnv_ops = {
1499		.name			= "pnd2/dnv",
1500		.type			= DNV,
1501		.pmiaddr_shift		= 0,
1502		.pmiidx_shift		= 1,
1503		.channels		= DNV_NUM_CHANNELS,
1504		.dimms_per_channel	= 2,
1505		.rd_reg			= dnv_rd_reg,
1506		.get_registers		= dnv_get_registers,
1507		.check_ecc		= dnv_check_ecc_active,
1508		.mk_region		= dnv_mk_region,
1509		.get_dimm_config	= dnv_get_dimm_config,
1510		.pmi2mem		= dnv_pmi2mem,
1511};
1512
1513static const struct x86_cpu_id pnd2_cpuids[] = {
1514	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,	&apl_ops),
1515	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D,	&dnv_ops),
1516	{ }
1517};
1518MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1519
1520static int __init pnd2_init(void)
1521{
1522	const struct x86_cpu_id *id;
1523	const char *owner;
1524	int rc;
1525
1526	edac_dbg(2, "\n");
1527
1528	if (ghes_get_devices())
1529		return -EBUSY;
1530
1531	owner = edac_get_owner();
1532	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1533		return -EBUSY;
1534
1535	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1536		return -ENODEV;
1537
1538	id = x86_match_cpu(pnd2_cpuids);
1539	if (!id)
1540		return -ENODEV;
1541
1542	ops = (struct dunit_ops *)id->driver_data;
1543
1544	if (ops->type == APL) {
1545		p2sb_bus = pci_find_bus(0, 0);
1546		if (!p2sb_bus)
1547			return -ENODEV;
1548	}
1549
1550	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1551	opstate_init();
1552
1553	rc = pnd2_probe();
1554	if (rc < 0) {
1555		pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1556		return rc;
1557	}
1558
1559	if (!pnd2_mci)
1560		return -ENODEV;
1561
1562	mce_register_decode_chain(&pnd2_mce_dec);
1563	setup_pnd2_debug();
1564
1565	return 0;
1566}
1567
1568static void __exit pnd2_exit(void)
1569{
1570	edac_dbg(2, "\n");
1571	teardown_pnd2_debug();
1572	mce_unregister_decode_chain(&pnd2_mce_dec);
1573	pnd2_remove();
1574}
1575
1576module_init(pnd2_init);
1577module_exit(pnd2_exit);
1578
1579module_param(edac_op_state, int, 0444);
1580MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1581
1582MODULE_LICENSE("GPL v2");
1583MODULE_AUTHOR("Tony Luck");
1584MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
1585