• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/mm.h>
2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/export.h>
5 #include <linux/err.h>
6 #include <linux/sched.h>
7 #include <linux/security.h>
8 #include <linux/swap.h>
9 #include <linux/swapops.h>
10 #include <linux/vmalloc.h>
11 #include <asm/uaccess.h>
12 
13 #include "internal.h"
14 
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/kmem.h>
17 
18 /**
19  * kstrdup - allocate space for and copy an existing string
20  * @s: the string to duplicate
21  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
22  */
kstrdup(const char * s,gfp_t gfp)23 char *kstrdup(const char *s, gfp_t gfp)
24 {
25 	size_t len;
26 	char *buf;
27 
28 	if (!s)
29 		return NULL;
30 
31 	len = strlen(s) + 1;
32 	buf = kmalloc_track_caller(len, gfp);
33 	if (buf)
34 		memcpy(buf, s, len);
35 	return buf;
36 }
37 EXPORT_SYMBOL(kstrdup);
38 
39 /**
40  * kstrndup - allocate space for and copy an existing string
41  * @s: the string to duplicate
42  * @max: read at most @max chars from @s
43  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
44  */
kstrndup(const char * s,size_t max,gfp_t gfp)45 char *kstrndup(const char *s, size_t max, gfp_t gfp)
46 {
47 	size_t len;
48 	char *buf;
49 
50 	if (!s)
51 		return NULL;
52 
53 	len = strnlen(s, max);
54 	buf = kmalloc_track_caller(len+1, gfp);
55 	if (buf) {
56 		memcpy(buf, s, len);
57 		buf[len] = '\0';
58 	}
59 	return buf;
60 }
61 EXPORT_SYMBOL(kstrndup);
62 
63 /**
64  * kmemdup - duplicate region of memory
65  *
66  * @src: memory region to duplicate
67  * @len: memory region length
68  * @gfp: GFP mask to use
69  */
kmemdup(const void * src,size_t len,gfp_t gfp)70 void *kmemdup(const void *src, size_t len, gfp_t gfp)
71 {
72 	void *p;
73 
74 	p = kmalloc_track_caller(len, gfp);
75 	if (p)
76 		memcpy(p, src, len);
77 	return p;
78 }
79 EXPORT_SYMBOL(kmemdup);
80 
81 /**
82  * memdup_user - duplicate memory region from user space
83  *
84  * @src: source address in user space
85  * @len: number of bytes to copy
86  *
87  * Returns an ERR_PTR() on failure.
88  */
memdup_user(const void __user * src,size_t len)89 void *memdup_user(const void __user *src, size_t len)
90 {
91 	void *p;
92 
93 	/*
94 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
95 	 * cause pagefault, which makes it pointless to use GFP_NOFS
96 	 * or GFP_ATOMIC.
97 	 */
98 	p = kmalloc_track_caller(len, GFP_KERNEL);
99 	if (!p)
100 		return ERR_PTR(-ENOMEM);
101 
102 	if (copy_from_user(p, src, len)) {
103 		kfree(p);
104 		return ERR_PTR(-EFAULT);
105 	}
106 
107 	return p;
108 }
109 EXPORT_SYMBOL(memdup_user);
110 
__do_krealloc(const void * p,size_t new_size,gfp_t flags)111 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
112 					   gfp_t flags)
113 {
114 	void *ret;
115 	size_t ks = 0;
116 
117 	if (p)
118 		ks = ksize(p);
119 
120 	if (ks >= new_size)
121 		return (void *)p;
122 
123 	ret = kmalloc_track_caller(new_size, flags);
124 	if (ret && p)
125 		memcpy(ret, p, ks);
126 
127 	return ret;
128 }
129 
130 /**
131  * __krealloc - like krealloc() but don't free @p.
132  * @p: object to reallocate memory for.
133  * @new_size: how many bytes of memory are required.
134  * @flags: the type of memory to allocate.
135  *
136  * This function is like krealloc() except it never frees the originally
137  * allocated buffer. Use this if you don't want to free the buffer immediately
138  * like, for example, with RCU.
139  */
__krealloc(const void * p,size_t new_size,gfp_t flags)140 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
141 {
142 	if (unlikely(!new_size))
143 		return ZERO_SIZE_PTR;
144 
145 	return __do_krealloc(p, new_size, flags);
146 
147 }
148 EXPORT_SYMBOL(__krealloc);
149 
150 /**
151  * krealloc - reallocate memory. The contents will remain unchanged.
152  * @p: object to reallocate memory for.
153  * @new_size: how many bytes of memory are required.
154  * @flags: the type of memory to allocate.
155  *
156  * The contents of the object pointed to are preserved up to the
157  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
158  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
159  * %NULL pointer, the object pointed to is freed.
160  */
krealloc(const void * p,size_t new_size,gfp_t flags)161 void *krealloc(const void *p, size_t new_size, gfp_t flags)
162 {
163 	void *ret;
164 
165 	if (unlikely(!new_size)) {
166 		kfree(p);
167 		return ZERO_SIZE_PTR;
168 	}
169 
170 	ret = __do_krealloc(p, new_size, flags);
171 	if (ret && p != ret)
172 		kfree(p);
173 
174 	return ret;
175 }
176 EXPORT_SYMBOL(krealloc);
177 
178 /**
179  * kzfree - like kfree but zero memory
180  * @p: object to free memory of
181  *
182  * The memory of the object @p points to is zeroed before freed.
183  * If @p is %NULL, kzfree() does nothing.
184  *
185  * Note: this function zeroes the whole allocated buffer which can be a good
186  * deal bigger than the requested buffer size passed to kmalloc(). So be
187  * careful when using this function in performance sensitive code.
188  */
kzfree(const void * p)189 void kzfree(const void *p)
190 {
191 	size_t ks;
192 	void *mem = (void *)p;
193 
194 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
195 		return;
196 	ks = ksize(mem);
197 	memset(mem, 0, ks);
198 	kfree(mem);
199 }
200 EXPORT_SYMBOL(kzfree);
201 
202 /*
203  * strndup_user - duplicate an existing string from user space
204  * @s: The string to duplicate
205  * @n: Maximum number of bytes to copy, including the trailing NUL.
206  */
strndup_user(const char __user * s,long n)207 char *strndup_user(const char __user *s, long n)
208 {
209 	char *p;
210 	long length;
211 
212 	length = strnlen_user(s, n);
213 
214 	if (!length)
215 		return ERR_PTR(-EFAULT);
216 
217 	if (length > n)
218 		return ERR_PTR(-EINVAL);
219 
220 	p = memdup_user(s, length);
221 
222 	if (IS_ERR(p))
223 		return p;
224 
225 	p[length - 1] = '\0';
226 
227 	return p;
228 }
229 EXPORT_SYMBOL(strndup_user);
230 
__vma_link_list(struct mm_struct * mm,struct vm_area_struct * vma,struct vm_area_struct * prev,struct rb_node * rb_parent)231 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
232 		struct vm_area_struct *prev, struct rb_node *rb_parent)
233 {
234 	struct vm_area_struct *next;
235 
236 	vma->vm_prev = prev;
237 	if (prev) {
238 		next = prev->vm_next;
239 		prev->vm_next = vma;
240 	} else {
241 		mm->mmap = vma;
242 		if (rb_parent)
243 			next = rb_entry(rb_parent,
244 					struct vm_area_struct, vm_rb);
245 		else
246 			next = NULL;
247 	}
248 	vma->vm_next = next;
249 	if (next)
250 		next->vm_prev = vma;
251 }
252 
253 /* Check if the vma is being used as a stack by this task */
vm_is_stack_for_task(struct task_struct * t,struct vm_area_struct * vma)254 static int vm_is_stack_for_task(struct task_struct *t,
255 				struct vm_area_struct *vma)
256 {
257 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
258 }
259 
260 /*
261  * Check if the vma is being used as a stack.
262  * If is_group is non-zero, check in the entire thread group or else
263  * just check in the current task. Returns the pid of the task that
264  * the vma is stack for.
265  */
vm_is_stack(struct task_struct * task,struct vm_area_struct * vma,int in_group)266 pid_t vm_is_stack(struct task_struct *task,
267 		  struct vm_area_struct *vma, int in_group)
268 {
269 	pid_t ret = 0;
270 
271 	if (vm_is_stack_for_task(task, vma))
272 		return task->pid;
273 
274 	if (in_group) {
275 		struct task_struct *t;
276 		rcu_read_lock();
277 		if (!pid_alive(task))
278 			goto done;
279 
280 		t = task;
281 		do {
282 			if (vm_is_stack_for_task(t, vma)) {
283 				ret = t->pid;
284 				goto done;
285 			}
286 		} while_each_thread(task, t);
287 done:
288 		rcu_read_unlock();
289 	}
290 
291 	return ret;
292 }
293 
294 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm)295 void arch_pick_mmap_layout(struct mm_struct *mm)
296 {
297 	mm->mmap_base = TASK_UNMAPPED_BASE;
298 	mm->get_unmapped_area = arch_get_unmapped_area;
299 	mm->unmap_area = arch_unmap_area;
300 }
301 #endif
302 
303 /*
304  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
305  * back to the regular GUP.
306  * If the architecture not support this function, simply return with no
307  * page pinned
308  */
__get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)309 int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
310 				 int nr_pages, int write, struct page **pages)
311 {
312 	return 0;
313 }
314 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
315 
316 /**
317  * get_user_pages_fast() - pin user pages in memory
318  * @start:	starting user address
319  * @nr_pages:	number of pages from start to pin
320  * @write:	whether pages will be written to
321  * @pages:	array that receives pointers to the pages pinned.
322  *		Should be at least nr_pages long.
323  *
324  * Returns number of pages pinned. This may be fewer than the number
325  * requested. If nr_pages is 0 or negative, returns 0. If no pages
326  * were pinned, returns -errno.
327  *
328  * get_user_pages_fast provides equivalent functionality to get_user_pages,
329  * operating on current and current->mm, with force=0 and vma=NULL. However
330  * unlike get_user_pages, it must be called without mmap_sem held.
331  *
332  * get_user_pages_fast may take mmap_sem and page table locks, so no
333  * assumptions can be made about lack of locking. get_user_pages_fast is to be
334  * implemented in a way that is advantageous (vs get_user_pages()) when the
335  * user memory area is already faulted in and present in ptes. However if the
336  * pages have to be faulted in, it may turn out to be slightly slower so
337  * callers need to carefully consider what to use. On many architectures,
338  * get_user_pages_fast simply falls back to get_user_pages.
339  */
get_user_pages_fast(unsigned long start,int nr_pages,int write,struct page ** pages)340 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
341 				int nr_pages, int write, struct page **pages)
342 {
343 	struct mm_struct *mm = current->mm;
344 	int ret;
345 
346 	down_read(&mm->mmap_sem);
347 	ret = get_user_pages(current, mm, start, nr_pages,
348 					write, 0, pages, NULL);
349 	up_read(&mm->mmap_sem);
350 
351 	return ret;
352 }
353 EXPORT_SYMBOL_GPL(get_user_pages_fast);
354 
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)355 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
356 	unsigned long len, unsigned long prot,
357 	unsigned long flag, unsigned long pgoff)
358 {
359 	unsigned long ret;
360 	struct mm_struct *mm = current->mm;
361 	unsigned long populate;
362 
363 	ret = security_mmap_file(file, prot, flag);
364 	if (!ret) {
365 		down_write(&mm->mmap_sem);
366 		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
367 				    &populate);
368 		up_write(&mm->mmap_sem);
369 		if (populate)
370 			mm_populate(ret, populate);
371 	}
372 	return ret;
373 }
374 
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)375 unsigned long vm_mmap(struct file *file, unsigned long addr,
376 	unsigned long len, unsigned long prot,
377 	unsigned long flag, unsigned long offset)
378 {
379 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
380 		return -EINVAL;
381 	if (unlikely(offset & ~PAGE_MASK))
382 		return -EINVAL;
383 
384 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
385 }
386 EXPORT_SYMBOL(vm_mmap);
387 
kvfree(const void * addr)388 void kvfree(const void *addr)
389 {
390 	if (is_vmalloc_addr(addr))
391 		vfree(addr);
392 	else
393 		kfree(addr);
394 }
395 EXPORT_SYMBOL(kvfree);
396 
page_mapping(struct page * page)397 struct address_space *page_mapping(struct page *page)
398 {
399 	struct address_space *mapping = page->mapping;
400 
401 	VM_BUG_ON(PageSlab(page));
402 #ifdef CONFIG_SWAP
403 	if (unlikely(PageSwapCache(page))) {
404 		swp_entry_t entry;
405 
406 		entry.val = page_private(page);
407 		mapping = swap_address_space(entry);
408 	} else
409 #endif
410 	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
411 		mapping = NULL;
412 	return mapping;
413 }
414 
415 /* Tracepoints definitions. */
416 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
417 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
418 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
419 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
420 EXPORT_TRACEPOINT_SYMBOL(kfree);
421 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
422