1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * MCE event pool management in MCE context
4 *
5 * Copyright (C) 2015 Intel Corp.
6 * Author: Chen, Gong <gong.chen@linux.intel.com>
7 */
8#include <linux/smp.h>
9#include <linux/mm.h>
10#include <linux/genalloc.h>
11#include <linux/llist.h>
12#include "internal.h"
13
14/*
15 * printk() is not safe in MCE context. This is a lock-less memory allocator
16 * used to save error information organized in a lock-less list.
17 *
18 * This memory pool is only to be used to save MCE records in MCE context.
19 * MCE events are rare, so a fixed size memory pool should be enough. Use
20 * 2 pages to save MCE events for now (~80 MCE records at most).
21 */
22#define MCE_POOLSZ	(2 * PAGE_SIZE)
23
24static struct gen_pool *mce_evt_pool;
25static LLIST_HEAD(mce_event_llist);
26static char gen_pool_buf[MCE_POOLSZ];
27
28/*
29 * Compare the record "t" with each of the records on list "l" to see if
30 * an equivalent one is present in the list.
31 */
32static bool is_duplicate_mce_record(struct mce_evt_llist *t, struct mce_evt_llist *l)
33{
34	struct mce_evt_llist *node;
35	struct mce *m1, *m2;
36
37	m1 = &t->mce;
38
39	llist_for_each_entry(node, &l->llnode, llnode) {
40		m2 = &node->mce;
41
42		if (!mce_cmp(m1, m2))
43			return true;
44	}
45	return false;
46}
47
48/*
49 * The system has panicked - we'd like to peruse the list of MCE records
50 * that have been queued, but not seen by anyone yet.  The list is in
51 * reverse time order, so we need to reverse it. While doing that we can
52 * also drop duplicate records (these were logged because some banks are
53 * shared between cores or by all threads on a socket).
54 */
55struct llist_node *mce_gen_pool_prepare_records(void)
56{
57	struct llist_node *head;
58	LLIST_HEAD(new_head);
59	struct mce_evt_llist *node, *t;
60
61	head = llist_del_all(&mce_event_llist);
62	if (!head)
63		return NULL;
64
65	/* squeeze out duplicates while reversing order */
66	llist_for_each_entry_safe(node, t, head, llnode) {
67		if (!is_duplicate_mce_record(node, t))
68			llist_add(&node->llnode, &new_head);
69	}
70
71	return new_head.first;
72}
73
74void mce_gen_pool_process(struct work_struct *__unused)
75{
76	struct llist_node *head;
77	struct mce_evt_llist *node, *tmp;
78	struct mce *mce;
79
80	head = llist_del_all(&mce_event_llist);
81	if (!head)
82		return;
83
84	head = llist_reverse_order(head);
85	llist_for_each_entry_safe(node, tmp, head, llnode) {
86		mce = &node->mce;
87		blocking_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
88		gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
89	}
90}
91
92bool mce_gen_pool_empty(void)
93{
94	return llist_empty(&mce_event_llist);
95}
96
97int mce_gen_pool_add(struct mce *mce)
98{
99	struct mce_evt_llist *node;
100
101	if (filter_mce(mce))
102		return -EINVAL;
103
104	if (!mce_evt_pool)
105		return -EINVAL;
106
107	node = (void *)gen_pool_alloc(mce_evt_pool, sizeof(*node));
108	if (!node) {
109		pr_warn_ratelimited("MCE records pool full!\n");
110		return -ENOMEM;
111	}
112
113	memcpy(&node->mce, mce, sizeof(*mce));
114	llist_add(&node->llnode, &mce_event_llist);
115
116	return 0;
117}
118
119static int mce_gen_pool_create(void)
120{
121	struct gen_pool *tmpp;
122	int ret = -ENOMEM;
123
124	tmpp = gen_pool_create(ilog2(sizeof(struct mce_evt_llist)), -1);
125	if (!tmpp)
126		goto out;
127
128	ret = gen_pool_add(tmpp, (unsigned long)gen_pool_buf, MCE_POOLSZ, -1);
129	if (ret) {
130		gen_pool_destroy(tmpp);
131		goto out;
132	}
133
134	mce_evt_pool = tmpp;
135
136out:
137	return ret;
138}
139
140int mce_gen_pool_init(void)
141{
142	/* Just init mce_gen_pool once. */
143	if (mce_evt_pool)
144		return 0;
145
146	return mce_gen_pool_create();
147}
148