1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <assert.h>
5#include <string.h>
6#include <stdlib.h>
7#include <errno.h>
8#include <limits.h>
9#include <stdio.h>
10
11typedef unsigned long dma_addr_t;
12
13#define unlikely
14
15#define BUG_ON(x) assert(!(x))
16
17#define WARN_ON(condition) ({                                           \
18	int __ret_warn_on = !!(condition);                              \
19	unlikely(__ret_warn_on);                                        \
20})
21
22#define WARN_ON_ONCE(condition) ({                              \
23	int __ret_warn_on = !!(condition);                      \
24	if (unlikely(__ret_warn_on))                            \
25		assert(0);                                      \
26	unlikely(__ret_warn_on);                                \
27})
28
29#define PAGE_SIZE	(4096)
30#define PAGE_SHIFT	(12)
31#define PAGE_MASK	(~(PAGE_SIZE-1))
32
33#define __ALIGN_KERNEL(x, a)		__ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
34#define __ALIGN_KERNEL_MASK(x, mask)	(((x) + (mask)) & ~(mask))
35#define ALIGN(x, a)			__ALIGN_KERNEL((x), (a))
36#define ALIGN_DOWN(x, a)		__ALIGN_KERNEL((x) - ((a) - 1), (a))
37
38#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
39
40#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
41
42#define virt_to_page(x)	((void *)x)
43#define page_address(x)	((void *)x)
44
45static inline unsigned long page_to_phys(struct page *page)
46{
47	assert(0);
48
49	return 0;
50}
51
52#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
53#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
54#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
55
56#define __min(t1, t2, min1, min2, x, y) ({              \
57	t1 min1 = (x);                                  \
58	t2 min2 = (y);                                  \
59	(void) (&min1 == &min2);                        \
60	min1 < min2 ? min1 : min2; })
61
62#define ___PASTE(a,b) a##b
63#define __PASTE(a,b) ___PASTE(a,b)
64
65#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
66
67#define min(x, y)                                       \
68	__min(typeof(x), typeof(y),                     \
69	      __UNIQUE_ID(min1_), __UNIQUE_ID(min2_),   \
70	      x, y)
71
72#define min_t(type, x, y)                               \
73	__min(type, type,                               \
74	      __UNIQUE_ID(min1_), __UNIQUE_ID(min2_),   \
75	      x, y)
76
77#define preemptible() (1)
78
79static inline void *kmap(struct page *page)
80{
81	assert(0);
82
83	return NULL;
84}
85
86static inline void *kmap_atomic(struct page *page)
87{
88	assert(0);
89
90	return NULL;
91}
92
93static inline void kunmap(void *addr)
94{
95	assert(0);
96}
97
98static inline void kunmap_atomic(void *addr)
99{
100	assert(0);
101}
102
103static inline unsigned long __get_free_page(unsigned int flags)
104{
105	return (unsigned long)malloc(PAGE_SIZE);
106}
107
108static inline void free_page(unsigned long page)
109{
110	free((void *)page);
111}
112
113static inline void *kmalloc(unsigned int size, unsigned int flags)
114{
115	return malloc(size);
116}
117
118static inline void *
119kmalloc_array(unsigned int n, unsigned int size, unsigned int flags)
120{
121	return malloc(n * size);
122}
123
124#define kfree(x) free(x)
125
126#define kmemleak_alloc(a, b, c, d)
127#define kmemleak_free(a)
128
129#define PageSlab(p) (0)
130
131#define MAX_ERRNO	4095
132
133#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
134
135static inline void * __must_check ERR_PTR(long error)
136{
137	return (void *) error;
138}
139
140static inline long __must_check PTR_ERR(__force const void *ptr)
141{
142	return (long) ptr;
143}
144
145static inline bool __must_check IS_ERR(__force const void *ptr)
146{
147	return IS_ERR_VALUE((unsigned long)ptr);
148}
149
150static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
151{
152	if (IS_ERR(ptr))
153		return PTR_ERR(ptr);
154	else
155		return 0;
156}
157
158#define IS_ENABLED(x) (0)
159
160#endif
161