• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/edac/
1/*
2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 *	http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/proc_fs.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/sysctl.h>
22#include <linux/highmem.h>
23#include <linux/timer.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/spinlock.h>
27#include <linux/list.h>
28#include <linux/sysdev.h>
29#include <linux/ctype.h>
30#include <linux/edac.h>
31#include <asm/uaccess.h>
32#include <asm/page.h>
33#include <asm/edac.h>
34#include "edac_core.h"
35#include "edac_module.h"
36
37/* lock to memory controller's control array */
38static DEFINE_MUTEX(mem_ctls_mutex);
39static LIST_HEAD(mc_devices);
40
41#ifdef CONFIG_EDAC_DEBUG
42
43static void edac_mc_dump_channel(struct channel_info *chan)
44{
45	debugf4("\tchannel = %p\n", chan);
46	debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
47	debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
48	debugf4("\tchannel->label = '%s'\n", chan->label);
49	debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
50}
51
52static void edac_mc_dump_csrow(struct csrow_info *csrow)
53{
54	debugf4("\tcsrow = %p\n", csrow);
55	debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
56	debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
57	debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
58	debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
59	debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
60	debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
61	debugf4("\tcsrow->channels = %p\n", csrow->channels);
62	debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
63}
64
65static void edac_mc_dump_mci(struct mem_ctl_info *mci)
66{
67	debugf3("\tmci = %p\n", mci);
68	debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
69	debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
70	debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
71	debugf4("\tmci->edac_check = %p\n", mci->edac_check);
72	debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
73		mci->nr_csrows, mci->csrows);
74	debugf3("\tdev = %p\n", mci->dev);
75	debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
76	debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
77}
78
79/*
80 * keep those in sync with the enum mem_type
81 */
82const char *edac_mem_types[] = {
83	"Empty csrow",
84	"Reserved csrow type",
85	"Unknown csrow type",
86	"Fast page mode RAM",
87	"Extended data out RAM",
88	"Burst Extended data out RAM",
89	"Single data rate SDRAM",
90	"Registered single data rate SDRAM",
91	"Double data rate SDRAM",
92	"Registered Double data rate SDRAM",
93	"Rambus DRAM",
94	"Unbuffered DDR2 RAM",
95	"Fully buffered DDR2",
96	"Registered DDR2 RAM",
97	"Rambus XDR",
98	"Unbuffered DDR3 RAM",
99	"Registered DDR3 RAM",
100};
101EXPORT_SYMBOL_GPL(edac_mem_types);
102
103#endif				/* CONFIG_EDAC_DEBUG */
104
105/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
106 * Adjust 'ptr' so that its alignment is at least as stringent as what the
107 * compiler would provide for X and return the aligned result.
108 *
109 * If 'size' is a constant, the compiler will optimize this whole function
110 * down to either a no-op or the addition of a constant to the value of 'ptr'.
111 */
112void *edac_align_ptr(void *ptr, unsigned size)
113{
114	unsigned align, r;
115
116	/* Here we assume that the alignment of a "long long" is the most
117	 * stringent alignment that the compiler will ever provide by default.
118	 * As far as I know, this is a reasonable assumption.
119	 */
120	if (size > sizeof(long))
121		align = sizeof(long long);
122	else if (size > sizeof(int))
123		align = sizeof(long);
124	else if (size > sizeof(short))
125		align = sizeof(int);
126	else if (size > sizeof(char))
127		align = sizeof(short);
128	else
129		return (char *)ptr;
130
131	r = size % align;
132
133	if (r == 0)
134		return (char *)ptr;
135
136	return (void *)(((unsigned long)ptr) + align - r);
137}
138
139/**
140 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
141 * @size_pvt:	size of private storage needed
142 * @nr_csrows:	Number of CWROWS needed for this MC
143 * @nr_chans:	Number of channels for the MC
144 *
145 * Everything is kmalloc'ed as one big chunk - more efficient.
146 * Only can be used if all structures have the same lifetime - otherwise
147 * you have to allocate and initialize your own structures.
148 *
149 * Use edac_mc_free() to free mc structures allocated by this function.
150 *
151 * Returns:
152 *	NULL allocation failed
153 *	struct mem_ctl_info pointer
154 */
155struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
156				unsigned nr_chans, int edac_index)
157{
158	struct mem_ctl_info *mci;
159	struct csrow_info *csi, *csrow;
160	struct channel_info *chi, *chp, *chan;
161	void *pvt;
162	unsigned size;
163	int row, chn;
164	int err;
165
166	/* Figure out the offsets of the various items from the start of an mc
167	 * structure.  We want the alignment of each item to be at least as
168	 * stringent as what the compiler would provide if we could simply
169	 * hardcode everything into a single struct.
170	 */
171	mci = (struct mem_ctl_info *)0;
172	csi = edac_align_ptr(&mci[1], sizeof(*csi));
173	chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
174	pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
175	size = ((unsigned long)pvt) + sz_pvt;
176
177	mci = kzalloc(size, GFP_KERNEL);
178	if (mci == NULL)
179		return NULL;
180
181	/* Adjust pointers so they point within the memory we just allocated
182	 * rather than an imaginary chunk of memory located at address 0.
183	 */
184	csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
185	chi = (struct channel_info *)(((char *)mci) + ((unsigned long)chi));
186	pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
187
188	/* setup index and various internal pointers */
189	mci->mc_idx = edac_index;
190	mci->csrows = csi;
191	mci->pvt_info = pvt;
192	mci->nr_csrows = nr_csrows;
193
194	for (row = 0; row < nr_csrows; row++) {
195		csrow = &csi[row];
196		csrow->csrow_idx = row;
197		csrow->mci = mci;
198		csrow->nr_channels = nr_chans;
199		chp = &chi[row * nr_chans];
200		csrow->channels = chp;
201
202		for (chn = 0; chn < nr_chans; chn++) {
203			chan = &chp[chn];
204			chan->chan_idx = chn;
205			chan->csrow = csrow;
206		}
207	}
208
209	mci->op_state = OP_ALLOC;
210
211	/*
212	 * Initialize the 'root' kobj for the edac_mc controller
213	 */
214	err = edac_mc_register_sysfs_main_kobj(mci);
215	if (err) {
216		kfree(mci);
217		return NULL;
218	}
219
220	/* at this point, the root kobj is valid, and in order to
221	 * 'free' the object, then the function:
222	 *      edac_mc_unregister_sysfs_main_kobj() must be called
223	 * which will perform kobj unregistration and the actual free
224	 * will occur during the kobject callback operation
225	 */
226	return mci;
227}
228EXPORT_SYMBOL_GPL(edac_mc_alloc);
229
230/**
231 * edac_mc_free
232 *	'Free' a previously allocated 'mci' structure
233 * @mci: pointer to a struct mem_ctl_info structure
234 */
235void edac_mc_free(struct mem_ctl_info *mci)
236{
237	edac_mc_unregister_sysfs_main_kobj(mci);
238}
239EXPORT_SYMBOL_GPL(edac_mc_free);
240
241
242/*
243 * find_mci_by_dev
244 *
245 *	scan list of controllers looking for the one that manages
246 *	the 'dev' device
247 */
248static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
249{
250	struct mem_ctl_info *mci;
251	struct list_head *item;
252
253	debugf3("%s()\n", __func__);
254
255	list_for_each(item, &mc_devices) {
256		mci = list_entry(item, struct mem_ctl_info, link);
257
258		if (mci->dev == dev)
259			return mci;
260	}
261
262	return NULL;
263}
264
265/*
266 * handler for EDAC to check if NMI type handler has asserted interrupt
267 */
268static int edac_mc_assert_error_check_and_clear(void)
269{
270	int old_state;
271
272	if (edac_op_state == EDAC_OPSTATE_POLL)
273		return 1;
274
275	old_state = edac_err_assert;
276	edac_err_assert = 0;
277
278	return old_state;
279}
280
281/*
282 * edac_mc_workq_function
283 *	performs the operation scheduled by a workq request
284 */
285static void edac_mc_workq_function(struct work_struct *work_req)
286{
287	struct delayed_work *d_work = to_delayed_work(work_req);
288	struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
289
290	mutex_lock(&mem_ctls_mutex);
291
292	/* if this control struct has movd to offline state, we are done */
293	if (mci->op_state == OP_OFFLINE) {
294		mutex_unlock(&mem_ctls_mutex);
295		return;
296	}
297
298	/* Only poll controllers that are running polled and have a check */
299	if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
300		mci->edac_check(mci);
301
302	mutex_unlock(&mem_ctls_mutex);
303
304	/* Reschedule */
305	queue_delayed_work(edac_workqueue, &mci->work,
306			msecs_to_jiffies(edac_mc_get_poll_msec()));
307}
308
309/*
310 * edac_mc_workq_setup
311 *	initialize a workq item for this mci
312 *	passing in the new delay period in msec
313 *
314 *	locking model:
315 *
316 *		called with the mem_ctls_mutex held
317 */
318static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
319{
320	debugf0("%s()\n", __func__);
321
322	/* if this instance is not in the POLL state, then simply return */
323	if (mci->op_state != OP_RUNNING_POLL)
324		return;
325
326	INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
327	queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
328}
329
330/*
331 * edac_mc_workq_teardown
332 *	stop the workq processing on this mci
333 *
334 *	locking model:
335 *
336 *		called WITHOUT lock held
337 */
338static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
339{
340	int status;
341
342	if (mci->op_state != OP_RUNNING_POLL)
343		return;
344
345	status = cancel_delayed_work(&mci->work);
346	if (status == 0) {
347		debugf0("%s() not canceled, flush the queue\n",
348			__func__);
349
350		/* workq instance might be running, wait for it */
351		flush_workqueue(edac_workqueue);
352	}
353}
354
355/*
356 * edac_mc_reset_delay_period(unsigned long value)
357 *
358 *	user space has updated our poll period value, need to
359 *	reset our workq delays
360 */
361void edac_mc_reset_delay_period(int value)
362{
363	struct mem_ctl_info *mci;
364	struct list_head *item;
365
366	mutex_lock(&mem_ctls_mutex);
367
368	/* scan the list and turn off all workq timers, doing so under lock
369	 */
370	list_for_each(item, &mc_devices) {
371		mci = list_entry(item, struct mem_ctl_info, link);
372
373		if (mci->op_state == OP_RUNNING_POLL)
374			cancel_delayed_work(&mci->work);
375	}
376
377	mutex_unlock(&mem_ctls_mutex);
378
379
380	/* re-walk the list, and reset the poll delay */
381	mutex_lock(&mem_ctls_mutex);
382
383	list_for_each(item, &mc_devices) {
384		mci = list_entry(item, struct mem_ctl_info, link);
385
386		edac_mc_workq_setup(mci, (unsigned long) value);
387	}
388
389	mutex_unlock(&mem_ctls_mutex);
390}
391
392
393
394/* Return 0 on success, 1 on failure.
395 * Before calling this function, caller must
396 * assign a unique value to mci->mc_idx.
397 *
398 *	locking model:
399 *
400 *		called with the mem_ctls_mutex lock held
401 */
402static int add_mc_to_global_list(struct mem_ctl_info *mci)
403{
404	struct list_head *item, *insert_before;
405	struct mem_ctl_info *p;
406
407	insert_before = &mc_devices;
408
409	p = find_mci_by_dev(mci->dev);
410	if (unlikely(p != NULL))
411		goto fail0;
412
413	list_for_each(item, &mc_devices) {
414		p = list_entry(item, struct mem_ctl_info, link);
415
416		if (p->mc_idx >= mci->mc_idx) {
417			if (unlikely(p->mc_idx == mci->mc_idx))
418				goto fail1;
419
420			insert_before = item;
421			break;
422		}
423	}
424
425	list_add_tail_rcu(&mci->link, insert_before);
426	atomic_inc(&edac_handlers);
427	return 0;
428
429fail0:
430	edac_printk(KERN_WARNING, EDAC_MC,
431		"%s (%s) %s %s already assigned %d\n", dev_name(p->dev),
432		edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
433	return 1;
434
435fail1:
436	edac_printk(KERN_WARNING, EDAC_MC,
437		"bug in low-level driver: attempt to assign\n"
438		"    duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
439	return 1;
440}
441
442static void complete_mc_list_del(struct rcu_head *head)
443{
444	struct mem_ctl_info *mci;
445
446	mci = container_of(head, struct mem_ctl_info, rcu);
447	INIT_LIST_HEAD(&mci->link);
448}
449
450static void del_mc_from_global_list(struct mem_ctl_info *mci)
451{
452	atomic_dec(&edac_handlers);
453	list_del_rcu(&mci->link);
454	call_rcu(&mci->rcu, complete_mc_list_del);
455	rcu_barrier();
456}
457
458/**
459 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
460 *
461 * If found, return a pointer to the structure.
462 * Else return NULL.
463 *
464 * Caller must hold mem_ctls_mutex.
465 */
466struct mem_ctl_info *edac_mc_find(int idx)
467{
468	struct list_head *item;
469	struct mem_ctl_info *mci;
470
471	list_for_each(item, &mc_devices) {
472		mci = list_entry(item, struct mem_ctl_info, link);
473
474		if (mci->mc_idx >= idx) {
475			if (mci->mc_idx == idx)
476				return mci;
477
478			break;
479		}
480	}
481
482	return NULL;
483}
484EXPORT_SYMBOL(edac_mc_find);
485
486/**
487 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
488 *                 create sysfs entries associated with mci structure
489 * @mci: pointer to the mci structure to be added to the list
490 * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
491 *
492 * Return:
493 *	0	Success
494 *	!0	Failure
495 */
496
497int edac_mc_add_mc(struct mem_ctl_info *mci)
498{
499	debugf0("%s()\n", __func__);
500
501#ifdef CONFIG_EDAC_DEBUG
502	if (edac_debug_level >= 3)
503		edac_mc_dump_mci(mci);
504
505	if (edac_debug_level >= 4) {
506		int i;
507
508		for (i = 0; i < mci->nr_csrows; i++) {
509			int j;
510
511			edac_mc_dump_csrow(&mci->csrows[i]);
512			for (j = 0; j < mci->csrows[i].nr_channels; j++)
513				edac_mc_dump_channel(&mci->csrows[i].
514						channels[j]);
515		}
516	}
517#endif
518	mutex_lock(&mem_ctls_mutex);
519
520	if (add_mc_to_global_list(mci))
521		goto fail0;
522
523	/* set load time so that error rate can be tracked */
524	mci->start_time = jiffies;
525
526	if (edac_create_sysfs_mci_device(mci)) {
527		edac_mc_printk(mci, KERN_WARNING,
528			"failed to create sysfs device\n");
529		goto fail1;
530	}
531
532	/* If there IS a check routine, then we are running POLLED */
533	if (mci->edac_check != NULL) {
534		/* This instance is NOW RUNNING */
535		mci->op_state = OP_RUNNING_POLL;
536
537		edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
538	} else {
539		mci->op_state = OP_RUNNING_INTERRUPT;
540	}
541
542	/* Report action taken */
543	edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
544		" DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
545
546	mutex_unlock(&mem_ctls_mutex);
547	return 0;
548
549fail1:
550	del_mc_from_global_list(mci);
551
552fail0:
553	mutex_unlock(&mem_ctls_mutex);
554	return 1;
555}
556EXPORT_SYMBOL_GPL(edac_mc_add_mc);
557
558/**
559 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
560 *                 remove mci structure from global list
561 * @pdev: Pointer to 'struct device' representing mci structure to remove.
562 *
563 * Return pointer to removed mci structure, or NULL if device not found.
564 */
565struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
566{
567	struct mem_ctl_info *mci;
568
569	debugf0("%s()\n", __func__);
570
571	mutex_lock(&mem_ctls_mutex);
572
573	/* find the requested mci struct in the global list */
574	mci = find_mci_by_dev(dev);
575	if (mci == NULL) {
576		mutex_unlock(&mem_ctls_mutex);
577		return NULL;
578	}
579
580	del_mc_from_global_list(mci);
581	mutex_unlock(&mem_ctls_mutex);
582
583	/* flush workq processes */
584	edac_mc_workq_teardown(mci);
585
586	/* marking MCI offline */
587	mci->op_state = OP_OFFLINE;
588
589	/* remove from sysfs */
590	edac_remove_sysfs_mci_device(mci);
591
592	edac_printk(KERN_INFO, EDAC_MC,
593		"Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
594		mci->mod_name, mci->ctl_name, edac_dev_name(mci));
595
596	return mci;
597}
598EXPORT_SYMBOL_GPL(edac_mc_del_mc);
599
600static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
601				u32 size)
602{
603	struct page *pg;
604	void *virt_addr;
605	unsigned long flags = 0;
606
607	debugf3("%s()\n", __func__);
608
609	/* ECC error page was not in our memory. Ignore it. */
610	if (!pfn_valid(page))
611		return;
612
613	/* Find the actual page structure then map it and fix */
614	pg = pfn_to_page(page);
615
616	if (PageHighMem(pg))
617		local_irq_save(flags);
618
619	virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
620
621	/* Perform architecture specific atomic scrub operation */
622	atomic_scrub(virt_addr + offset, size);
623
624	/* Unmap and complete */
625	kunmap_atomic(virt_addr, KM_BOUNCE_READ);
626
627	if (PageHighMem(pg))
628		local_irq_restore(flags);
629}
630
631int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
632{
633	struct csrow_info *csrows = mci->csrows;
634	int row, i;
635
636	debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
637	row = -1;
638
639	for (i = 0; i < mci->nr_csrows; i++) {
640		struct csrow_info *csrow = &csrows[i];
641
642		if (csrow->nr_pages == 0)
643			continue;
644
645		debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
646			"mask(0x%lx)\n", mci->mc_idx, __func__,
647			csrow->first_page, page, csrow->last_page,
648			csrow->page_mask);
649
650		if ((page >= csrow->first_page) &&
651		    (page <= csrow->last_page) &&
652		    ((page & csrow->page_mask) ==
653		     (csrow->first_page & csrow->page_mask))) {
654			row = i;
655			break;
656		}
657	}
658
659	if (row == -1)
660		edac_mc_printk(mci, KERN_ERR,
661			"could not look up page error address %lx\n",
662			(unsigned long)page);
663
664	return row;
665}
666EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
667
668void edac_mc_handle_ce(struct mem_ctl_info *mci,
669		unsigned long page_frame_number,
670		unsigned long offset_in_page, unsigned long syndrome,
671		int row, int channel, const char *msg)
672{
673	unsigned long remapped_page;
674
675	debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
676
677	if (row >= mci->nr_csrows || row < 0) {
678		/* something is wrong */
679		edac_mc_printk(mci, KERN_ERR,
680			"INTERNAL ERROR: row out of range "
681			"(%d >= %d)\n", row, mci->nr_csrows);
682		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
683		return;
684	}
685
686	if (channel >= mci->csrows[row].nr_channels || channel < 0) {
687		/* something is wrong */
688		edac_mc_printk(mci, KERN_ERR,
689			"INTERNAL ERROR: channel out of range "
690			"(%d >= %d)\n", channel,
691			mci->csrows[row].nr_channels);
692		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
693		return;
694	}
695
696	if (edac_mc_get_log_ce())
697		edac_mc_printk(mci, KERN_WARNING,
698			"CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
699			"0x%lx, row %d, channel %d, label \"%s\": %s\n",
700			page_frame_number, offset_in_page,
701			mci->csrows[row].grain, syndrome, row, channel,
702			mci->csrows[row].channels[channel].label, msg);
703
704	mci->ce_count++;
705	mci->csrows[row].ce_count++;
706	mci->csrows[row].channels[channel].ce_count++;
707
708	if (mci->scrub_mode & SCRUB_SW_SRC) {
709		/*
710		 * Some MC's can remap memory so that it is still available
711		 * at a different address when PCI devices map into memory.
712		 * MC's that can't do this lose the memory where PCI devices
713		 * are mapped.  This mapping is MC dependant and so we call
714		 * back into the MC driver for it to map the MC page to
715		 * a physical (CPU) page which can then be mapped to a virtual
716		 * page - which can then be scrubbed.
717		 */
718		remapped_page = mci->ctl_page_to_phys ?
719			mci->ctl_page_to_phys(mci, page_frame_number) :
720			page_frame_number;
721
722		edac_mc_scrub_block(remapped_page, offset_in_page,
723				mci->csrows[row].grain);
724	}
725}
726EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
727
728void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
729{
730	if (edac_mc_get_log_ce())
731		edac_mc_printk(mci, KERN_WARNING,
732			"CE - no information available: %s\n", msg);
733
734	mci->ce_noinfo_count++;
735	mci->ce_count++;
736}
737EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
738
739void edac_mc_handle_ue(struct mem_ctl_info *mci,
740		unsigned long page_frame_number,
741		unsigned long offset_in_page, int row, const char *msg)
742{
743	int len = EDAC_MC_LABEL_LEN * 4;
744	char labels[len + 1];
745	char *pos = labels;
746	int chan;
747	int chars;
748
749	debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
750
751	if (row >= mci->nr_csrows || row < 0) {
752		/* something is wrong */
753		edac_mc_printk(mci, KERN_ERR,
754			"INTERNAL ERROR: row out of range "
755			"(%d >= %d)\n", row, mci->nr_csrows);
756		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
757		return;
758	}
759
760	chars = snprintf(pos, len + 1, "%s",
761			 mci->csrows[row].channels[0].label);
762	len -= chars;
763	pos += chars;
764
765	for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
766		chan++) {
767		chars = snprintf(pos, len + 1, ":%s",
768				 mci->csrows[row].channels[chan].label);
769		len -= chars;
770		pos += chars;
771	}
772
773	if (edac_mc_get_log_ue())
774		edac_mc_printk(mci, KERN_EMERG,
775			"UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
776			"labels \"%s\": %s\n", page_frame_number,
777			offset_in_page, mci->csrows[row].grain, row,
778			labels, msg);
779
780	if (edac_mc_get_panic_on_ue())
781		panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
782			"row %d, labels \"%s\": %s\n", mci->mc_idx,
783			page_frame_number, offset_in_page,
784			mci->csrows[row].grain, row, labels, msg);
785
786	mci->ue_count++;
787	mci->csrows[row].ue_count++;
788}
789EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
790
791void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
792{
793	if (edac_mc_get_panic_on_ue())
794		panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
795
796	if (edac_mc_get_log_ue())
797		edac_mc_printk(mci, KERN_WARNING,
798			"UE - no information available: %s\n", msg);
799	mci->ue_noinfo_count++;
800	mci->ue_count++;
801}
802EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
803
804/*************************************************************
805 * On Fully Buffered DIMM modules, this help function is
806 * called to process UE events
807 */
808void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
809			unsigned int csrow,
810			unsigned int channela,
811			unsigned int channelb, char *msg)
812{
813	int len = EDAC_MC_LABEL_LEN * 4;
814	char labels[len + 1];
815	char *pos = labels;
816	int chars;
817
818	if (csrow >= mci->nr_csrows) {
819		/* something is wrong */
820		edac_mc_printk(mci, KERN_ERR,
821			"INTERNAL ERROR: row out of range (%d >= %d)\n",
822			csrow, mci->nr_csrows);
823		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
824		return;
825	}
826
827	if (channela >= mci->csrows[csrow].nr_channels) {
828		/* something is wrong */
829		edac_mc_printk(mci, KERN_ERR,
830			"INTERNAL ERROR: channel-a out of range "
831			"(%d >= %d)\n",
832			channela, mci->csrows[csrow].nr_channels);
833		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
834		return;
835	}
836
837	if (channelb >= mci->csrows[csrow].nr_channels) {
838		/* something is wrong */
839		edac_mc_printk(mci, KERN_ERR,
840			"INTERNAL ERROR: channel-b out of range "
841			"(%d >= %d)\n",
842			channelb, mci->csrows[csrow].nr_channels);
843		edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
844		return;
845	}
846
847	mci->ue_count++;
848	mci->csrows[csrow].ue_count++;
849
850	/* Generate the DIMM labels from the specified channels */
851	chars = snprintf(pos, len + 1, "%s",
852			 mci->csrows[csrow].channels[channela].label);
853	len -= chars;
854	pos += chars;
855	chars = snprintf(pos, len + 1, "-%s",
856			 mci->csrows[csrow].channels[channelb].label);
857
858	if (edac_mc_get_log_ue())
859		edac_mc_printk(mci, KERN_EMERG,
860			"UE row %d, channel-a= %d channel-b= %d "
861			"labels \"%s\": %s\n", csrow, channela, channelb,
862			labels, msg);
863
864	if (edac_mc_get_panic_on_ue())
865		panic("UE row %d, channel-a= %d channel-b= %d "
866			"labels \"%s\": %s\n", csrow, channela,
867			channelb, labels, msg);
868}
869EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
870
871/*************************************************************
872 * On Fully Buffered DIMM modules, this help function is
873 * called to process CE events
874 */
875void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
876			unsigned int csrow, unsigned int channel, char *msg)
877{
878
879	/* Ensure boundary values */
880	if (csrow >= mci->nr_csrows) {
881		/* something is wrong */
882		edac_mc_printk(mci, KERN_ERR,
883			"INTERNAL ERROR: row out of range (%d >= %d)\n",
884			csrow, mci->nr_csrows);
885		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
886		return;
887	}
888	if (channel >= mci->csrows[csrow].nr_channels) {
889		/* something is wrong */
890		edac_mc_printk(mci, KERN_ERR,
891			"INTERNAL ERROR: channel out of range (%d >= %d)\n",
892			channel, mci->csrows[csrow].nr_channels);
893		edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
894		return;
895	}
896
897	if (edac_mc_get_log_ce())
898		edac_mc_printk(mci, KERN_WARNING,
899			"CE row %d, channel %d, label \"%s\": %s\n",
900			csrow, channel,
901			mci->csrows[csrow].channels[channel].label, msg);
902
903	mci->ce_count++;
904	mci->csrows[csrow].ce_count++;
905	mci->csrows[csrow].channels[channel].ce_count++;
906}
907EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
908