• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_MM_H
2 #define _LINUX_MM_H
3 
4 #include <assert.h>
5 #include <string.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <limits.h>
9 #include <stdio.h>
10 
11 typedef unsigned long dma_addr_t;
12 
13 #define unlikely
14 
15 #define BUG_ON(x) assert(!(x))
16 
17 #define WARN_ON(condition) ({                                           \
18 	int __ret_warn_on = !!(condition);                              \
19 	unlikely(__ret_warn_on);                                        \
20 })
21 
22 #define WARN_ON_ONCE(condition) ({                              \
23 	int __ret_warn_on = !!(condition);                      \
24 	if (unlikely(__ret_warn_on))                            \
25 		assert(0);                                      \
26 	unlikely(__ret_warn_on);                                \
27 })
28 
29 #define PAGE_SIZE	(4096)
30 #define PAGE_SHIFT	(12)
31 #define PAGE_MASK	(~(PAGE_SIZE-1))
32 
33 #define __ALIGN_KERNEL(x, a)		__ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
34 #define __ALIGN_KERNEL_MASK(x, mask)	(((x) + (mask)) & ~(mask))
35 #define ALIGN(x, a)			__ALIGN_KERNEL((x), (a))
36 
37 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
38 
39 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
40 
41 #define virt_to_page(x)	((void *)x)
42 #define page_address(x)	((void *)x)
43 
page_to_phys(struct page * page)44 static inline unsigned long page_to_phys(struct page *page)
45 {
46 	assert(0);
47 
48 	return 0;
49 }
50 
51 #define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
52 #define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
53 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
54 
55 #define __min(t1, t2, min1, min2, x, y) ({              \
56 	t1 min1 = (x);                                  \
57 	t2 min2 = (y);                                  \
58 	(void) (&min1 == &min2);                        \
59 	min1 < min2 ? min1 : min2; })
60 
61 #define ___PASTE(a,b) a##b
62 #define __PASTE(a,b) ___PASTE(a,b)
63 
64 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
65 
66 #define min(x, y)                                       \
67 	__min(typeof(x), typeof(y),                     \
68 	      __UNIQUE_ID(min1_), __UNIQUE_ID(min2_),   \
69 	      x, y)
70 
71 #define min_t(type, x, y)                               \
72 	__min(type, type,                               \
73 	      __UNIQUE_ID(min1_), __UNIQUE_ID(min2_),   \
74 	      x, y)
75 
76 #define preemptible() (1)
77 
kmap(struct page * page)78 static inline void *kmap(struct page *page)
79 {
80 	assert(0);
81 
82 	return NULL;
83 }
84 
kmap_atomic(struct page * page)85 static inline void *kmap_atomic(struct page *page)
86 {
87 	assert(0);
88 
89 	return NULL;
90 }
91 
kunmap(void * addr)92 static inline void kunmap(void *addr)
93 {
94 	assert(0);
95 }
96 
kunmap_atomic(void * addr)97 static inline void kunmap_atomic(void *addr)
98 {
99 	assert(0);
100 }
101 
__get_free_page(unsigned int flags)102 static inline unsigned long __get_free_page(unsigned int flags)
103 {
104 	return (unsigned long)malloc(PAGE_SIZE);
105 }
106 
free_page(unsigned long page)107 static inline void free_page(unsigned long page)
108 {
109 	free((void *)page);
110 }
111 
kmalloc(unsigned int size,unsigned int flags)112 static inline void *kmalloc(unsigned int size, unsigned int flags)
113 {
114 	return malloc(size);
115 }
116 
117 #define kfree(x) free(x)
118 
119 #define kmemleak_alloc(a, b, c, d)
120 #define kmemleak_free(a)
121 
122 #define PageSlab(p) (0)
123 #define flush_kernel_dcache_page(p)
124 
125 #endif
126