1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright �� 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
4 */
5
6/* Overhauled routines for dealing with different mmap regions of flash */
7
8#ifndef __LINUX_MTD_MAP_H__
9#define __LINUX_MTD_MAP_H__
10
11#include <linux/types.h>
12#include <linux/list.h>
13#include <linux/string.h>
14#include <linux/bug.h>
15#include <linux/kernel.h>
16#include <linux/io.h>
17
18#include <asm/unaligned.h>
19#include <asm/barrier.h>
20
21#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
22#define map_bankwidth(map) 1
23#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
24#define map_bankwidth_is_large(map) (0)
25#define map_words(map) (1)
26#define MAX_MAP_BANKWIDTH 1
27#else
28#define map_bankwidth_is_1(map) (0)
29#endif
30
31#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
32# ifdef map_bankwidth
33#  undef map_bankwidth
34#  define map_bankwidth(map) ((map)->bankwidth)
35# else
36#  define map_bankwidth(map) 2
37#  define map_bankwidth_is_large(map) (0)
38#  define map_words(map) (1)
39# endif
40#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2)
41#undef MAX_MAP_BANKWIDTH
42#define MAX_MAP_BANKWIDTH 2
43#else
44#define map_bankwidth_is_2(map) (0)
45#endif
46
47#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
48# ifdef map_bankwidth
49#  undef map_bankwidth
50#  define map_bankwidth(map) ((map)->bankwidth)
51# else
52#  define map_bankwidth(map) 4
53#  define map_bankwidth_is_large(map) (0)
54#  define map_words(map) (1)
55# endif
56#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4)
57#undef MAX_MAP_BANKWIDTH
58#define MAX_MAP_BANKWIDTH 4
59#else
60#define map_bankwidth_is_4(map) (0)
61#endif
62
63/* ensure we never evaluate anything shorted than an unsigned long
64 * to zero, and ensure we'll never miss the end of an comparison (bjd) */
65
66#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
67
68#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
69# ifdef map_bankwidth
70#  undef map_bankwidth
71#  define map_bankwidth(map) ((map)->bankwidth)
72#  if BITS_PER_LONG < 64
73#   undef map_bankwidth_is_large
74#   define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
75#   undef map_words
76#   define map_words(map) map_calc_words(map)
77#  endif
78# else
79#  define map_bankwidth(map) 8
80#  define map_bankwidth_is_large(map) (BITS_PER_LONG < 64)
81#  define map_words(map) map_calc_words(map)
82# endif
83#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8)
84#undef MAX_MAP_BANKWIDTH
85#define MAX_MAP_BANKWIDTH 8
86#else
87#define map_bankwidth_is_8(map) (0)
88#endif
89
90#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
91# ifdef map_bankwidth
92#  undef map_bankwidth
93#  define map_bankwidth(map) ((map)->bankwidth)
94#  undef map_bankwidth_is_large
95#  define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
96#  undef map_words
97#  define map_words(map) map_calc_words(map)
98# else
99#  define map_bankwidth(map) 16
100#  define map_bankwidth_is_large(map) (1)
101#  define map_words(map) map_calc_words(map)
102# endif
103#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16)
104#undef MAX_MAP_BANKWIDTH
105#define MAX_MAP_BANKWIDTH 16
106#else
107#define map_bankwidth_is_16(map) (0)
108#endif
109
110#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
111/* always use indirect access for 256-bit to preserve kernel stack */
112# undef map_bankwidth
113# define map_bankwidth(map) ((map)->bankwidth)
114# undef map_bankwidth_is_large
115# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
116# undef map_words
117# define map_words(map) map_calc_words(map)
118#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
119#undef MAX_MAP_BANKWIDTH
120#define MAX_MAP_BANKWIDTH 32
121#else
122#define map_bankwidth_is_32(map) (0)
123#endif
124
125#ifndef map_bankwidth
126#ifdef CONFIG_MTD
127#warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
128#endif
129static inline int map_bankwidth(void *map)
130{
131	BUG();
132	return 0;
133}
134#define map_bankwidth_is_large(map) (0)
135#define map_words(map) (0)
136#define MAX_MAP_BANKWIDTH 1
137#endif
138
139static inline int map_bankwidth_supported(int w)
140{
141	switch (w) {
142#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
143	case 1:
144#endif
145#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
146	case 2:
147#endif
148#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
149	case 4:
150#endif
151#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
152	case 8:
153#endif
154#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
155	case 16:
156#endif
157#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
158	case 32:
159#endif
160		return 1;
161
162	default:
163		return 0;
164	}
165}
166
167#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG)
168
169typedef union {
170	unsigned long x[MAX_MAP_LONGS];
171} map_word;
172
173/* The map stuff is very simple. You fill in your struct map_info with
174   a handful of routines for accessing the device, making sure they handle
175   paging etc. correctly if your device needs it. Then you pass it off
176   to a chip probe routine -- either JEDEC or CFI probe or both -- via
177   do_map_probe(). If a chip is recognised, the probe code will invoke the
178   appropriate chip driver (if present) and return a struct mtd_info.
179   At which point, you fill in the mtd->module with your own module
180   address, and register it with the MTD core code. Or you could partition
181   it and register the partitions instead, or keep it for your own private
182   use; whatever.
183
184   The mtd->priv field will point to the struct map_info, and any further
185   private data required by the chip driver is linked from the
186   mtd->priv->fldrv_priv field. This allows the map driver to get at
187   the destructor function map->fldrv_destroy() when it's tired
188   of living.
189*/
190
191struct map_info {
192	const char *name;
193	unsigned long size;
194	resource_size_t phys;
195#define NO_XIP (-1UL)
196
197	void __iomem *virt;
198	void *cached;
199
200	int swap; /* this mapping's byte-swapping requirement */
201	int bankwidth; /* in octets. This isn't necessarily the width
202		       of actual bus cycles -- it's the repeat interval
203		      in bytes, before you are talking to the first chip again.
204		      */
205
206#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
207	map_word (*read)(struct map_info *, unsigned long);
208	void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t);
209
210	void (*write)(struct map_info *, const map_word, unsigned long);
211	void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t);
212
213	/* We can perhaps put in 'point' and 'unpoint' methods, if we really
214	   want to enable XIP for non-linear mappings. Not yet though. */
215#endif
216	/* It's possible for the map driver to use cached memory in its
217	   copy_from implementation (and _only_ with copy_from).  However,
218	   when the chip driver knows some flash area has changed contents,
219	   it will signal it to the map driver through this routine to let
220	   the map driver invalidate the corresponding cache as needed.
221	   If there is no cache to care about this can be set to NULL. */
222	void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
223
224	/* This will be called with 1 as parameter when the first map user
225	 * needs VPP, and called with 0 when the last user exits. The map
226	 * core maintains a reference counter, and assumes that VPP is a
227	 * global resource applying to all mapped flash chips on the system.
228	 */
229	void (*set_vpp)(struct map_info *, int);
230
231	unsigned long pfow_base;
232	unsigned long map_priv_1;
233	unsigned long map_priv_2;
234	struct device_node *device_node;
235	void *fldrv_priv;
236	struct mtd_chip_driver *fldrv;
237};
238
239struct mtd_chip_driver {
240	struct mtd_info *(*probe)(struct map_info *map);
241	void (*destroy)(struct mtd_info *);
242	struct module *module;
243	char *name;
244	struct list_head list;
245};
246
247void register_mtd_chip_driver(struct mtd_chip_driver *);
248void unregister_mtd_chip_driver(struct mtd_chip_driver *);
249
250struct mtd_info *do_map_probe(const char *name, struct map_info *map);
251void map_destroy(struct mtd_info *mtd);
252
253#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
254#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
255
256#define INVALIDATE_CACHED_RANGE(map, from, size) \
257	do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
258
259#define map_word_equal(map, val1, val2)					\
260({									\
261	int i, ret = 1;							\
262	for (i = 0; i < map_words(map); i++)				\
263		if ((val1).x[i] != (val2).x[i]) {			\
264			ret = 0;					\
265			break;						\
266		}							\
267	ret;								\
268})
269
270#define map_word_and(map, val1, val2)					\
271({									\
272	map_word r;							\
273	int i;								\
274	for (i = 0; i < map_words(map); i++)				\
275		r.x[i] = (val1).x[i] & (val2).x[i];			\
276	r;								\
277})
278
279#define map_word_clr(map, val1, val2)					\
280({									\
281	map_word r;							\
282	int i;								\
283	for (i = 0; i < map_words(map); i++)				\
284		r.x[i] = (val1).x[i] & ~(val2).x[i];			\
285	r;								\
286})
287
288#define map_word_or(map, val1, val2)					\
289({									\
290	map_word r;							\
291	int i;								\
292	for (i = 0; i < map_words(map); i++)				\
293		r.x[i] = (val1).x[i] | (val2).x[i];			\
294	r;								\
295})
296
297#define map_word_andequal(map, val1, val2, val3)			\
298({									\
299	int i, ret = 1;							\
300	for (i = 0; i < map_words(map); i++) {				\
301		if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) {	\
302			ret = 0;					\
303			break;						\
304		}							\
305	}								\
306	ret;								\
307})
308
309#define map_word_bitsset(map, val1, val2)				\
310({									\
311	int i, ret = 0;							\
312	for (i = 0; i < map_words(map); i++) {				\
313		if ((val1).x[i] & (val2).x[i]) {			\
314			ret = 1;					\
315			break;						\
316		}							\
317	}								\
318	ret;								\
319})
320
321static inline map_word map_word_load(struct map_info *map, const void *ptr)
322{
323	map_word r;
324
325	if (map_bankwidth_is_1(map))
326		r.x[0] = *(unsigned char *)ptr;
327	else if (map_bankwidth_is_2(map))
328		r.x[0] = get_unaligned((uint16_t *)ptr);
329	else if (map_bankwidth_is_4(map))
330		r.x[0] = get_unaligned((uint32_t *)ptr);
331#if BITS_PER_LONG >= 64
332	else if (map_bankwidth_is_8(map))
333		r.x[0] = get_unaligned((uint64_t *)ptr);
334#endif
335	else if (map_bankwidth_is_large(map))
336		memcpy(r.x, ptr, map->bankwidth);
337	else
338		BUG();
339
340	return r;
341}
342
343static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len)
344{
345	int i;
346
347	if (map_bankwidth_is_large(map)) {
348		char *dest = (char *)&orig;
349
350		memcpy(dest+start, buf, len);
351	} else {
352		for (i = start; i < start+len; i++) {
353			int bitpos;
354
355#ifdef __LITTLE_ENDIAN
356			bitpos = i * 8;
357#else /* __BIG_ENDIAN */
358			bitpos = (map_bankwidth(map) - 1 - i) * 8;
359#endif
360			orig.x[0] &= ~(0xff << bitpos);
361			orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
362		}
363	}
364	return orig;
365}
366
367#if BITS_PER_LONG < 64
368#define MAP_FF_LIMIT 4
369#else
370#define MAP_FF_LIMIT 8
371#endif
372
373static inline map_word map_word_ff(struct map_info *map)
374{
375	map_word r;
376	int i;
377
378	if (map_bankwidth(map) < MAP_FF_LIMIT) {
379		int bw = 8 * map_bankwidth(map);
380
381		r.x[0] = (1UL << bw) - 1;
382	} else {
383		for (i = 0; i < map_words(map); i++)
384			r.x[i] = ~0UL;
385	}
386	return r;
387}
388
389static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
390{
391	map_word r;
392
393	if (map_bankwidth_is_1(map))
394		r.x[0] = __raw_readb(map->virt + ofs);
395	else if (map_bankwidth_is_2(map))
396		r.x[0] = __raw_readw(map->virt + ofs);
397	else if (map_bankwidth_is_4(map))
398		r.x[0] = __raw_readl(map->virt + ofs);
399#if BITS_PER_LONG >= 64
400	else if (map_bankwidth_is_8(map))
401		r.x[0] = __raw_readq(map->virt + ofs);
402#endif
403	else if (map_bankwidth_is_large(map))
404		memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
405	else
406		BUG();
407
408	return r;
409}
410
411static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
412{
413	if (map_bankwidth_is_1(map))
414		__raw_writeb(datum.x[0], map->virt + ofs);
415	else if (map_bankwidth_is_2(map))
416		__raw_writew(datum.x[0], map->virt + ofs);
417	else if (map_bankwidth_is_4(map))
418		__raw_writel(datum.x[0], map->virt + ofs);
419#if BITS_PER_LONG >= 64
420	else if (map_bankwidth_is_8(map))
421		__raw_writeq(datum.x[0], map->virt + ofs);
422#endif
423	else if (map_bankwidth_is_large(map))
424		memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
425	else
426		BUG();
427	mb();
428}
429
430static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
431{
432	if (map->cached)
433		memcpy(to, (char *)map->cached + from, len);
434	else
435		memcpy_fromio(to, map->virt + from, len);
436}
437
438static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
439{
440	memcpy_toio(map->virt + to, from, len);
441}
442
443#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
444#define map_read(map, ofs) (map)->read(map, ofs)
445#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
446#define map_write(map, datum, ofs) (map)->write(map, datum, ofs)
447#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
448
449extern void simple_map_init(struct map_info *);
450#define map_is_linear(map) (map->phys != NO_XIP)
451
452#else
453#define map_read(map, ofs) inline_map_read(map, ofs)
454#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
455#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs)
456#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
457
458
459#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
460#define map_is_linear(map) ({ (void)(map); 1; })
461
462#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */
463
464#endif /* __LINUX_MTD_MAP_H__ */
465