• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* drivers/android/pmem.c
2  *
3  * Copyright (C) 2007 Google, Inc.
4  *
5  * This software is licensed under the terms of the GNU General Public
6  * License version 2, as published by the Free Software Foundation, and
7  * may be copied, distributed, and modified under those terms.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  */
15 
16 #include <linux/miscdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/fs.h>
19 #include <linux/file.h>
20 #include <linux/mm.h>
21 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/android_pmem.h>
24 #include <linux/mempolicy.h>
25 #include <linux/sched.h>
26 #include <asm/io.h>
27 #include <asm/uaccess.h>
28 #include <asm/cacheflush.h>
29 
30 #define PMEM_MAX_DEVICES 10
31 #define PMEM_MAX_ORDER 128
32 #define PMEM_MIN_ALLOC PAGE_SIZE
33 
34 #define PMEM_DEBUG 1
35 
36 /* indicates that a refernce to this file has been taken via get_pmem_file,
37  * the file should not be released until put_pmem_file is called */
38 #define PMEM_FLAGS_BUSY 0x1
39 /* indicates that this is a suballocation of a larger master range */
40 #define PMEM_FLAGS_CONNECTED 0x1 << 1
41 /* indicates this is a master and not a sub allocation and that it is mmaped */
42 #define PMEM_FLAGS_MASTERMAP 0x1 << 2
43 /* submap and unsubmap flags indicate:
44  * 00: subregion has never been mmaped
45  * 10: subregion has been mmaped, reference to the mm was taken
46  * 11: subretion has ben released, refernece to the mm still held
47  * 01: subretion has been released, reference to the mm has been released
48  */
49 #define PMEM_FLAGS_SUBMAP 0x1 << 3
50 #define PMEM_FLAGS_UNSUBMAP 0x1 << 4
51 
52 
53 struct pmem_data {
54 	/* in alloc mode: an index into the bitmap
55 	 * in no_alloc mode: the size of the allocation */
56 	int index;
57 	/* see flags above for descriptions */
58 	unsigned int flags;
59 	/* protects this data field, if the mm_mmap sem will be held at the
60 	 * same time as this sem, the mm sem must be taken first (as this is
61 	 * the order for vma_open and vma_close ops */
62 	struct rw_semaphore sem;
63 	/* info about the mmaping process */
64 	struct vm_area_struct *vma;
65 	/* task struct of the mapping process */
66 	struct task_struct *task;
67 	/* process id of teh mapping process */
68 	pid_t pid;
69 	/* file descriptor of the master */
70 	int master_fd;
71 	/* file struct of the master */
72 	struct file *master_file;
73 	/* a list of currently available regions if this is a suballocation */
74 	struct list_head region_list;
75 	/* a linked list of data so we can access them for debugging */
76 	struct list_head list;
77 #if PMEM_DEBUG
78 	int ref;
79 #endif
80 };
81 
82 struct pmem_bits {
83 	unsigned allocated:1;		/* 1 if allocated, 0 if free */
84 	unsigned order:7;		/* size of the region in pmem space */
85 };
86 
87 struct pmem_region_node {
88 	struct pmem_region region;
89 	struct list_head list;
90 };
91 
92 #define PMEM_DEBUG_MSGS 0
93 #if PMEM_DEBUG_MSGS
94 #define DLOG(fmt,args...) \
95 	do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
96 		    ##args); } \
97 	while (0)
98 #else
99 #define DLOG(x...) do {} while (0)
100 #endif
101 
102 struct pmem_info {
103 	struct miscdevice dev;
104 	/* physical start address of the remaped pmem space */
105 	unsigned long base;
106 	/* vitual start address of the remaped pmem space */
107 	unsigned char __iomem *vbase;
108 	/* total size of the pmem space */
109 	unsigned long size;
110 	/* number of entries in the pmem space */
111 	unsigned long num_entries;
112 	/* pfn of the garbage page in memory */
113 	unsigned long garbage_pfn;
114 	/* index of the garbage page in the pmem space */
115 	int garbage_index;
116 	/* the bitmap for the region indicating which entries are allocated
117 	 * and which are free */
118 	struct pmem_bits *bitmap;
119 	/* indicates the region should not be managed with an allocator */
120 	unsigned no_allocator;
121 	/* indicates maps of this region should be cached, if a mix of
122 	 * cached and uncached is desired, set this and open the device with
123 	 * O_SYNC to get an uncached region */
124 	unsigned cached;
125 	unsigned buffered;
126 	/* in no_allocator mode the first mapper gets the whole space and sets
127 	 * this flag */
128 	unsigned allocated;
129 	/* for debugging, creates a list of pmem file structs, the
130 	 * data_list_sem should be taken before pmem_data->sem if both are
131 	 * needed */
132 	struct semaphore data_list_sem;
133 	struct list_head data_list;
134 	/* pmem_sem protects the bitmap array
135 	 * a write lock should be held when modifying entries in bitmap
136 	 * a read lock should be held when reading data from bits or
137 	 * dereferencing a pointer into bitmap
138 	 *
139 	 * pmem_data->sem protects the pmem data of a particular file
140 	 * Many of the function that require the pmem_data->sem have a non-
141 	 * locking version for when the caller is already holding that sem.
142 	 *
143 	 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
144 	 * down(pmem_data->sem) => down(bitmap_sem)
145 	 */
146 	struct rw_semaphore bitmap_sem;
147 
148 	long (*ioctl)(struct file *, unsigned int, unsigned long);
149 	int (*release)(struct inode *, struct file *);
150 };
151 
152 static struct pmem_info pmem[PMEM_MAX_DEVICES];
153 static int id_count;
154 
155 #define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
156 #define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
157 #define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
158 #define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
159 #define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
160 #define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
161 #define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
162 #define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
163 	PMEM_LEN(id, index))
164 #define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
165 #define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
166 	PMEM_LEN(id, index))
167 #define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
168 #define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
169 #define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
170 	(!(data->flags & PMEM_FLAGS_UNSUBMAP)))
171 
172 static int pmem_release(struct inode *, struct file *);
173 static int pmem_mmap(struct file *, struct vm_area_struct *);
174 static int pmem_open(struct inode *, struct file *);
175 static long pmem_ioctl(struct file *, unsigned int, unsigned long);
176 
177 struct file_operations pmem_fops = {
178 	.release = pmem_release,
179 	.mmap = pmem_mmap,
180 	.open = pmem_open,
181 	.unlocked_ioctl = pmem_ioctl,
182 };
183 
get_id(struct file * file)184 static int get_id(struct file *file)
185 {
186 	return MINOR(file->f_dentry->d_inode->i_rdev);
187 }
188 
is_pmem_file(struct file * file)189 int is_pmem_file(struct file *file)
190 {
191 	int id;
192 
193 	if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
194 		return 0;
195 	id = get_id(file);
196 	if (unlikely(id >= PMEM_MAX_DEVICES))
197 		return 0;
198 	if (unlikely(file->f_dentry->d_inode->i_rdev !=
199 	     MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
200 		return 0;
201 	return 1;
202 }
203 
has_allocation(struct file * file)204 static int has_allocation(struct file *file)
205 {
206 	struct pmem_data *data;
207 	/* check is_pmem_file first if not accessed via pmem_file_ops */
208 
209 	if (unlikely(!file->private_data))
210 		return 0;
211 	data = (struct pmem_data *)file->private_data;
212 	if (unlikely(data->index < 0))
213 		return 0;
214 	return 1;
215 }
216 
is_master_owner(struct file * file)217 static int is_master_owner(struct file *file)
218 {
219 	struct file *master_file;
220 	struct pmem_data *data;
221 	int put_needed, ret = 0;
222 
223 	if (!is_pmem_file(file) || !has_allocation(file))
224 		return 0;
225 	data = (struct pmem_data *)file->private_data;
226 	if (PMEM_FLAGS_MASTERMAP & data->flags)
227 		return 1;
228 	master_file = fget_light(data->master_fd, &put_needed);
229 	if (master_file && data->master_file == master_file)
230 		ret = 1;
231 	fput_light(master_file, put_needed);
232 	return ret;
233 }
234 
pmem_free(int id,int index)235 static int pmem_free(int id, int index)
236 {
237 	/* caller should hold the write lock on pmem_sem! */
238 	int buddy, curr = index;
239 	DLOG("index %d\n", index);
240 
241 	if (pmem[id].no_allocator) {
242 		pmem[id].allocated = 0;
243 		return 0;
244 	}
245 	/* clean up the bitmap, merging any buddies */
246 	pmem[id].bitmap[curr].allocated = 0;
247 	/* find a slots buddy Buddy# = Slot# ^ (1 << order)
248 	 * if the buddy is also free merge them
249 	 * repeat until the buddy is not free or end of the bitmap is reached
250 	 */
251 	do {
252 		buddy = PMEM_BUDDY_INDEX(id, curr);
253 		if (PMEM_IS_FREE(id, buddy) &&
254 				PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
255 			PMEM_ORDER(id, buddy)++;
256 			PMEM_ORDER(id, curr)++;
257 			curr = min(buddy, curr);
258 		} else {
259 			break;
260 		}
261 	} while (curr < pmem[id].num_entries);
262 
263 	return 0;
264 }
265 
266 static void pmem_revoke(struct file *file, struct pmem_data *data);
267 
pmem_release(struct inode * inode,struct file * file)268 static int pmem_release(struct inode *inode, struct file *file)
269 {
270 	struct pmem_data *data = (struct pmem_data *)file->private_data;
271 	struct pmem_region_node *region_node;
272 	struct list_head *elt, *elt2;
273 	int id = get_id(file), ret = 0;
274 
275 
276 	down(&pmem[id].data_list_sem);
277 	/* if this file is a master, revoke all the memory in the connected
278 	 *  files */
279 	if (PMEM_FLAGS_MASTERMAP & data->flags) {
280 		struct pmem_data *sub_data;
281 		list_for_each(elt, &pmem[id].data_list) {
282 			sub_data = list_entry(elt, struct pmem_data, list);
283 			down_read(&sub_data->sem);
284 			if (PMEM_IS_SUBMAP(sub_data) &&
285 			    file == sub_data->master_file) {
286 				up_read(&sub_data->sem);
287 				pmem_revoke(file, sub_data);
288 			}  else
289 				up_read(&sub_data->sem);
290 		}
291 	}
292 	list_del(&data->list);
293 	up(&pmem[id].data_list_sem);
294 
295 
296 	down_write(&data->sem);
297 
298 	/* if its not a conencted file and it has an allocation, free it */
299 	if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
300 		down_write(&pmem[id].bitmap_sem);
301 		ret = pmem_free(id, data->index);
302 		up_write(&pmem[id].bitmap_sem);
303 	}
304 
305 	/* if this file is a submap (mapped, connected file), downref the
306 	 * task struct */
307 	if (PMEM_FLAGS_SUBMAP & data->flags)
308 		if (data->task) {
309 			put_task_struct(data->task);
310 			data->task = NULL;
311 		}
312 
313 	file->private_data = NULL;
314 
315 	list_for_each_safe(elt, elt2, &data->region_list) {
316 		region_node = list_entry(elt, struct pmem_region_node, list);
317 		list_del(elt);
318 		kfree(region_node);
319 	}
320 	BUG_ON(!list_empty(&data->region_list));
321 
322 	up_write(&data->sem);
323 	kfree(data);
324 	if (pmem[id].release)
325 		ret = pmem[id].release(inode, file);
326 
327 	return ret;
328 }
329 
pmem_open(struct inode * inode,struct file * file)330 static int pmem_open(struct inode *inode, struct file *file)
331 {
332 	struct pmem_data *data;
333 	int id = get_id(file);
334 	int ret = 0;
335 
336 	DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
337 	/* setup file->private_data to indicate its unmapped */
338 	/*  you can only open a pmem device one time */
339 	if (file->private_data != NULL)
340 		return -1;
341 	data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
342 	if (!data) {
343 		printk("pmem: unable to allocate memory for pmem metadata.");
344 		return -1;
345 	}
346 	data->flags = 0;
347 	data->index = -1;
348 	data->task = NULL;
349 	data->vma = NULL;
350 	data->pid = 0;
351 	data->master_file = NULL;
352 #if PMEM_DEBUG
353 	data->ref = 0;
354 #endif
355 	INIT_LIST_HEAD(&data->region_list);
356 	init_rwsem(&data->sem);
357 
358 	file->private_data = data;
359 	INIT_LIST_HEAD(&data->list);
360 
361 	down(&pmem[id].data_list_sem);
362 	list_add(&data->list, &pmem[id].data_list);
363 	up(&pmem[id].data_list_sem);
364 	return ret;
365 }
366 
pmem_order(unsigned long len)367 static unsigned long pmem_order(unsigned long len)
368 {
369 	int i;
370 
371 	len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
372 	len--;
373 	for (i = 0; i < sizeof(len)*8; i++)
374 		if (len >> i == 0)
375 			break;
376 	return i;
377 }
378 
pmem_allocate(int id,unsigned long len)379 static int pmem_allocate(int id, unsigned long len)
380 {
381 	/* caller should hold the write lock on pmem_sem! */
382 	/* return the corresponding pdata[] entry */
383 	int curr = 0;
384 	int end = pmem[id].num_entries;
385 	int best_fit = -1;
386 	unsigned long order = pmem_order(len);
387 
388 	if (pmem[id].no_allocator) {
389 		DLOG("no allocator");
390 		if ((len > pmem[id].size) || pmem[id].allocated)
391 			return -1;
392 		pmem[id].allocated = 1;
393 		return len;
394 	}
395 
396 	if (order > PMEM_MAX_ORDER)
397 		return -1;
398 	DLOG("order %lx\n", order);
399 
400 	/* look through the bitmap:
401 	 * 	if you find a free slot of the correct order use it
402 	 * 	otherwise, use the best fit (smallest with size > order) slot
403 	 */
404 	while (curr < end) {
405 		if (PMEM_IS_FREE(id, curr)) {
406 			if (PMEM_ORDER(id, curr) == (unsigned char)order) {
407 				/* set the not free bit and clear others */
408 				best_fit = curr;
409 				break;
410 			}
411 			if (PMEM_ORDER(id, curr) > (unsigned char)order &&
412 			    (best_fit < 0 ||
413 			     PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
414 				best_fit = curr;
415 		}
416 		curr = PMEM_NEXT_INDEX(id, curr);
417 	}
418 
419 	/* if best_fit < 0, there are no suitable slots,
420 	 * return an error
421 	 */
422 	if (best_fit < 0) {
423 		printk("pmem: no space left to allocate!\n");
424 		return -1;
425 	}
426 
427 	/* now partition the best fit:
428 	 * 	split the slot into 2 buddies of order - 1
429 	 * 	repeat until the slot is of the correct order
430 	 */
431 	while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
432 		int buddy;
433 		PMEM_ORDER(id, best_fit) -= 1;
434 		buddy = PMEM_BUDDY_INDEX(id, best_fit);
435 		PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
436 	}
437 	pmem[id].bitmap[best_fit].allocated = 1;
438 	return best_fit;
439 }
440 
phys_mem_access_prot(struct file * file,pgprot_t vma_prot)441 static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
442 {
443 	int id = get_id(file);
444 #ifdef pgprot_noncached
445 	if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
446 		return pgprot_noncached(vma_prot);
447 #endif
448 #ifdef pgprot_ext_buffered
449 	else if (pmem[id].buffered)
450 		return pgprot_ext_buffered(vma_prot);
451 #endif
452 	return vma_prot;
453 }
454 
pmem_start_addr(int id,struct pmem_data * data)455 static unsigned long pmem_start_addr(int id, struct pmem_data *data)
456 {
457 	if (pmem[id].no_allocator)
458 		return PMEM_START_ADDR(id, 0);
459 	else
460 		return PMEM_START_ADDR(id, data->index);
461 
462 }
463 
pmem_start_vaddr(int id,struct pmem_data * data)464 static void *pmem_start_vaddr(int id, struct pmem_data *data)
465 {
466 	return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
467 }
468 
pmem_len(int id,struct pmem_data * data)469 static unsigned long pmem_len(int id, struct pmem_data *data)
470 {
471 	if (pmem[id].no_allocator)
472 		return data->index;
473 	else
474 		return PMEM_LEN(id, data->index);
475 }
476 
pmem_map_garbage(int id,struct vm_area_struct * vma,struct pmem_data * data,unsigned long offset,unsigned long len)477 static int pmem_map_garbage(int id, struct vm_area_struct *vma,
478 			    struct pmem_data *data, unsigned long offset,
479 			    unsigned long len)
480 {
481 	int i, garbage_pages = len >> PAGE_SHIFT;
482 
483 	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
484 	for (i = 0; i < garbage_pages; i++) {
485 		if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
486 		    pmem[id].garbage_pfn))
487 			return -EAGAIN;
488 	}
489 	return 0;
490 }
491 
pmem_unmap_pfn_range(int id,struct vm_area_struct * vma,struct pmem_data * data,unsigned long offset,unsigned long len)492 static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
493 				struct pmem_data *data, unsigned long offset,
494 				unsigned long len)
495 {
496 	int garbage_pages;
497 	DLOG("unmap offset %lx len %lx\n", offset, len);
498 
499 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
500 
501 	garbage_pages = len >> PAGE_SHIFT;
502 	zap_page_range(vma, vma->vm_start + offset, len, NULL);
503 	pmem_map_garbage(id, vma, data, offset, len);
504 	return 0;
505 }
506 
pmem_map_pfn_range(int id,struct vm_area_struct * vma,struct pmem_data * data,unsigned long offset,unsigned long len)507 static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
508 			      struct pmem_data *data, unsigned long offset,
509 			      unsigned long len)
510 {
511 	DLOG("map offset %lx len %lx\n", offset, len);
512 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
513 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
514 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
515 	BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
516 
517 	if (io_remap_pfn_range(vma, vma->vm_start + offset,
518 		(pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
519 		len, vma->vm_page_prot)) {
520 		return -EAGAIN;
521 	}
522 	return 0;
523 }
524 
pmem_remap_pfn_range(int id,struct vm_area_struct * vma,struct pmem_data * data,unsigned long offset,unsigned long len)525 static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
526 			      struct pmem_data *data, unsigned long offset,
527 			      unsigned long len)
528 {
529 	/* hold the mm semp for the vma you are modifying when you call this */
530 	BUG_ON(!vma);
531 	zap_page_range(vma, vma->vm_start + offset, len, NULL);
532 	return pmem_map_pfn_range(id, vma, data, offset, len);
533 }
534 
pmem_vma_open(struct vm_area_struct * vma)535 static void pmem_vma_open(struct vm_area_struct *vma)
536 {
537 	struct file *file = vma->vm_file;
538 	struct pmem_data *data = file->private_data;
539 	int id = get_id(file);
540 	/* this should never be called as we don't support copying pmem
541 	 * ranges via fork */
542 	BUG_ON(!has_allocation(file));
543 	down_write(&data->sem);
544 	/* remap the garbage pages, forkers don't get access to the data */
545 	pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
546 	up_write(&data->sem);
547 }
548 
pmem_vma_close(struct vm_area_struct * vma)549 static void pmem_vma_close(struct vm_area_struct *vma)
550 {
551 	struct file *file = vma->vm_file;
552 	struct pmem_data *data = file->private_data;
553 
554 	DLOG("current %u ppid %u file %p count %d\n", current->pid,
555 	     current->parent->pid, file, file_count(file));
556 	if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
557 		printk(KERN_WARNING "pmem: something is very wrong, you are "
558 		       "closing a vm backing an allocation that doesn't "
559 		       "exist!\n");
560 		return;
561 	}
562 	down_write(&data->sem);
563 	if (data->vma == vma) {
564 		data->vma = NULL;
565 		if ((data->flags & PMEM_FLAGS_CONNECTED) &&
566 		    (data->flags & PMEM_FLAGS_SUBMAP))
567 			data->flags |= PMEM_FLAGS_UNSUBMAP;
568 	}
569 	/* the kernel is going to free this vma now anyway */
570 	up_write(&data->sem);
571 }
572 
573 static struct vm_operations_struct vm_ops = {
574 	.open = pmem_vma_open,
575 	.close = pmem_vma_close,
576 };
577 
pmem_mmap(struct file * file,struct vm_area_struct * vma)578 static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
579 {
580 	struct pmem_data *data;
581 	int index;
582 	unsigned long vma_size =  vma->vm_end - vma->vm_start;
583 	int ret = 0, id = get_id(file);
584 
585 	if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
586 #if PMEM_DEBUG
587 		printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
588 				" and a multiple of pages_size.\n");
589 #endif
590 		return -EINVAL;
591 	}
592 
593 	data = (struct pmem_data *)file->private_data;
594 	down_write(&data->sem);
595 	/* check this file isn't already mmaped, for submaps check this file
596 	 * has never been mmaped */
597 	if ((data->flags & PMEM_FLAGS_MASTERMAP) ||
598 	    (data->flags & PMEM_FLAGS_SUBMAP) ||
599 	    (data->flags & PMEM_FLAGS_UNSUBMAP)) {
600 #if PMEM_DEBUG
601 		printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
602 		       "this file is already mmaped. %x\n", data->flags);
603 #endif
604 		ret = -EINVAL;
605 		goto error;
606 	}
607 	/* if file->private_data == unalloced, alloc*/
608 	if (data && data->index == -1) {
609 		down_write(&pmem[id].bitmap_sem);
610 		index = pmem_allocate(id, vma->vm_end - vma->vm_start);
611 		up_write(&pmem[id].bitmap_sem);
612 		data->index = index;
613 	}
614 	/* either no space was available or an error occured */
615 	if (!has_allocation(file)) {
616 		ret = -EINVAL;
617 		printk("pmem: could not find allocation for map.\n");
618 		goto error;
619 	}
620 
621 	if (pmem_len(id, data) < vma_size) {
622 #if PMEM_DEBUG
623 		printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
624 		       "size of backing region [%lu].\n", vma_size,
625 		       pmem_len(id, data));
626 #endif
627 		ret = -EINVAL;
628 		goto error;
629 	}
630 
631 	vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
632 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot);
633 
634 	if (data->flags & PMEM_FLAGS_CONNECTED) {
635 		struct pmem_region_node *region_node;
636 		struct list_head *elt;
637 		if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
638 			printk("pmem: mmap failed in kernel!\n");
639 			ret = -EAGAIN;
640 			goto error;
641 		}
642 		list_for_each(elt, &data->region_list) {
643 			region_node = list_entry(elt, struct pmem_region_node,
644 						 list);
645 			DLOG("remapping file: %p %lx %lx\n", file,
646 				region_node->region.offset,
647 				region_node->region.len);
648 			if (pmem_remap_pfn_range(id, vma, data,
649 						 region_node->region.offset,
650 						 region_node->region.len)) {
651 				ret = -EAGAIN;
652 				goto error;
653 			}
654 		}
655 		data->flags |= PMEM_FLAGS_SUBMAP;
656 		get_task_struct(current->group_leader);
657 		data->task = current->group_leader;
658 		data->vma = vma;
659 #if PMEM_DEBUG
660 		data->pid = current->pid;
661 #endif
662 		DLOG("submmapped file %p vma %p pid %u\n", file, vma,
663 		     current->pid);
664 	} else {
665 		if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
666 			printk(KERN_INFO "pmem: mmap failed in kernel!\n");
667 			ret = -EAGAIN;
668 			goto error;
669 		}
670 		data->flags |= PMEM_FLAGS_MASTERMAP;
671 		data->pid = current->pid;
672 	}
673 	vma->vm_ops = &vm_ops;
674 error:
675 	up_write(&data->sem);
676 	return ret;
677 }
678 
679 /* the following are the api for accessing pmem regions by other drivers
680  * from inside the kernel */
get_pmem_user_addr(struct file * file,unsigned long * start,unsigned long * len)681 int get_pmem_user_addr(struct file *file, unsigned long *start,
682 		   unsigned long *len)
683 {
684 	struct pmem_data *data;
685 	if (!is_pmem_file(file) || !has_allocation(file)) {
686 #if PMEM_DEBUG
687 		printk(KERN_INFO "pmem: requested pmem data from invalid"
688 				  "file.\n");
689 #endif
690 		return -1;
691 	}
692 	data = (struct pmem_data *)file->private_data;
693 	down_read(&data->sem);
694 	if (data->vma) {
695 		*start = data->vma->vm_start;
696 		*len = data->vma->vm_end - data->vma->vm_start;
697 	} else {
698 		*start = 0;
699 		*len = 0;
700 	}
701 	up_read(&data->sem);
702 	return 0;
703 }
704 
get_pmem_addr(struct file * file,unsigned long * start,unsigned long * vstart,unsigned long * len)705 int get_pmem_addr(struct file *file, unsigned long *start,
706 		  unsigned long *vstart, unsigned long *len)
707 {
708 	struct pmem_data *data;
709 	int id;
710 
711 	if (!is_pmem_file(file) || !has_allocation(file)) {
712 		return -1;
713 	}
714 
715 	data = (struct pmem_data *)file->private_data;
716 	if (data->index == -1) {
717 #if PMEM_DEBUG
718 		printk(KERN_INFO "pmem: requested pmem data from file with no "
719 		       "allocation.\n");
720 		return -1;
721 #endif
722 	}
723 	id = get_id(file);
724 
725 	down_read(&data->sem);
726 	*start = pmem_start_addr(id, data);
727 	*len = pmem_len(id, data);
728 	*vstart = (unsigned long)pmem_start_vaddr(id, data);
729 	up_read(&data->sem);
730 #if PMEM_DEBUG
731 	down_write(&data->sem);
732 	data->ref++;
733 	up_write(&data->sem);
734 #endif
735 	return 0;
736 }
737 
get_pmem_file(int fd,unsigned long * start,unsigned long * vstart,unsigned long * len,struct file ** filp)738 int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
739 		  unsigned long *len, struct file **filp)
740 {
741 	struct file *file;
742 
743 	file = fget(fd);
744 	if (unlikely(file == NULL)) {
745 		printk(KERN_INFO "pmem: requested data from file descriptor "
746 		       "that doesn't exist.");
747 		return -1;
748 	}
749 
750 	if (get_pmem_addr(file, start, vstart, len))
751 		goto end;
752 
753 	if (filp)
754 		*filp = file;
755 	return 0;
756 end:
757 	fput(file);
758 	return -1;
759 }
760 
put_pmem_file(struct file * file)761 void put_pmem_file(struct file *file)
762 {
763 	struct pmem_data *data;
764 	int id;
765 
766 	if (!is_pmem_file(file))
767 		return;
768 	id = get_id(file);
769 	data = (struct pmem_data *)file->private_data;
770 #if PMEM_DEBUG
771 	down_write(&data->sem);
772 	if (data->ref == 0) {
773 		printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
774 		       pmem[id].dev.name, data->pid);
775 		BUG();
776 	}
777 	data->ref--;
778 	up_write(&data->sem);
779 #endif
780 	fput(file);
781 }
782 
flush_pmem_file(struct file * file,unsigned long offset,unsigned long len)783 void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
784 {
785 	struct pmem_data *data;
786 	int id;
787 	void *vaddr;
788 	struct pmem_region_node *region_node;
789 	struct list_head *elt;
790 	void *flush_start, *flush_end;
791 
792 	if (!is_pmem_file(file) || !has_allocation(file)) {
793 		return;
794 	}
795 
796 	id = get_id(file);
797 	data = (struct pmem_data *)file->private_data;
798 	if (!pmem[id].cached)
799 		return;
800 
801 	down_read(&data->sem);
802 	vaddr = pmem_start_vaddr(id, data);
803 	/* if this isn't a submmapped file, flush the whole thing */
804 	if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
805 		dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
806 		goto end;
807 	}
808 	/* otherwise, flush the region of the file we are drawing */
809 	list_for_each(elt, &data->region_list) {
810 		region_node = list_entry(elt, struct pmem_region_node, list);
811 		if ((offset >= region_node->region.offset) &&
812 		    ((offset + len) <= (region_node->region.offset +
813 			region_node->region.len))) {
814 			flush_start = vaddr + region_node->region.offset;
815 			flush_end = flush_start + region_node->region.len;
816 			dmac_flush_range(flush_start, flush_end);
817 			break;
818 		}
819 	}
820 end:
821 	up_read(&data->sem);
822 }
823 
pmem_connect(unsigned long connect,struct file * file)824 static int pmem_connect(unsigned long connect, struct file *file)
825 {
826 	struct pmem_data *data = (struct pmem_data *)file->private_data;
827 	struct pmem_data *src_data;
828 	struct file *src_file;
829 	int ret = 0, put_needed;
830 
831 	down_write(&data->sem);
832 	/* retrieve the src file and check it is a pmem file with an alloc */
833 	src_file = fget_light(connect, &put_needed);
834 	DLOG("connect %p to %p\n", file, src_file);
835 	if (!src_file) {
836 		printk("pmem: src file not found!\n");
837 		ret = -EINVAL;
838 		goto err_no_file;
839 	}
840 	if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
841 		printk(KERN_INFO "pmem: src file is not a pmem file or has no "
842 		       "alloc!\n");
843 		ret = -EINVAL;
844 		goto err_bad_file;
845 	}
846 	src_data = (struct pmem_data *)src_file->private_data;
847 
848 	if (has_allocation(file) && (data->index != src_data->index)) {
849 		printk("pmem: file is already mapped but doesn't match this"
850 		       " src_file!\n");
851 		ret = -EINVAL;
852 		goto err_bad_file;
853 	}
854 	data->index = src_data->index;
855 	data->flags |= PMEM_FLAGS_CONNECTED;
856 	data->master_fd = connect;
857 	data->master_file = src_file;
858 
859 err_bad_file:
860 	fput_light(src_file, put_needed);
861 err_no_file:
862 	up_write(&data->sem);
863 	return ret;
864 }
865 
pmem_unlock_data_and_mm(struct pmem_data * data,struct mm_struct * mm)866 static void pmem_unlock_data_and_mm(struct pmem_data *data,
867 				    struct mm_struct *mm)
868 {
869 	up_write(&data->sem);
870 	if (mm != NULL) {
871 		up_write(&mm->mmap_sem);
872 		mmput(mm);
873 	}
874 }
875 
pmem_lock_data_and_mm(struct file * file,struct pmem_data * data,struct mm_struct ** locked_mm)876 static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
877 				 struct mm_struct **locked_mm)
878 {
879 	int ret = 0;
880 	struct mm_struct *mm = NULL;
881 	*locked_mm = NULL;
882 lock_mm:
883 	down_read(&data->sem);
884 	if (PMEM_IS_SUBMAP(data)) {
885 		mm = get_task_mm(data->task);
886 		if (!mm) {
887 #if PMEM_DEBUG
888 			printk("pmem: can't remap task is gone!\n");
889 #endif
890 			up_read(&data->sem);
891 			return -1;
892 		}
893 	}
894 	up_read(&data->sem);
895 
896 	if (mm)
897 		down_write(&mm->mmap_sem);
898 
899 	down_write(&data->sem);
900 	/* check that the file didn't get mmaped before we could take the
901 	 * data sem, this should be safe b/c you can only submap each file
902 	 * once */
903 	if (PMEM_IS_SUBMAP(data) && !mm) {
904 		pmem_unlock_data_and_mm(data, mm);
905 		up_write(&data->sem);
906 		goto lock_mm;
907 	}
908 	/* now check that vma.mm is still there, it could have been
909 	 * deleted by vma_close before we could get the data->sem */
910 	if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
911 		/* might as well release this */
912 		if (data->flags & PMEM_FLAGS_SUBMAP) {
913 			put_task_struct(data->task);
914 			data->task = NULL;
915 			/* lower the submap flag to show the mm is gone */
916 			data->flags &= ~(PMEM_FLAGS_SUBMAP);
917 		}
918 		pmem_unlock_data_and_mm(data, mm);
919 		return -1;
920 	}
921 	*locked_mm = mm;
922 	return ret;
923 }
924 
pmem_remap(struct pmem_region * region,struct file * file,unsigned operation)925 int pmem_remap(struct pmem_region *region, struct file *file,
926 		      unsigned operation)
927 {
928 	int ret;
929 	struct pmem_region_node *region_node;
930 	struct mm_struct *mm = NULL;
931 	struct list_head *elt, *elt2;
932 	int id = get_id(file);
933 	struct pmem_data *data = (struct pmem_data *)file->private_data;
934 
935 	/* pmem region must be aligned on a page boundry */
936 	if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
937 		 !PMEM_IS_PAGE_ALIGNED(region->len))) {
938 #if PMEM_DEBUG
939 		printk("pmem: request for unaligned pmem suballocation "
940 		       "%lx %lx\n", region->offset, region->len);
941 #endif
942 		return -EINVAL;
943 	}
944 
945 	/* if userspace requests a region of len 0, there's nothing to do */
946 	if (region->len == 0)
947 		return 0;
948 
949 	/* lock the mm and data */
950 	ret = pmem_lock_data_and_mm(file, data, &mm);
951 	if (ret)
952 		return 0;
953 
954 	/* only the owner of the master file can remap the client fds
955 	 * that back in it */
956 	if (!is_master_owner(file)) {
957 #if PMEM_DEBUG
958 		printk("pmem: remap requested from non-master process\n");
959 #endif
960 		ret = -EINVAL;
961 		goto err;
962 	}
963 
964 	/* check that the requested range is within the src allocation */
965 	if (unlikely((region->offset > pmem_len(id, data)) ||
966 		     (region->len > pmem_len(id, data)) ||
967 		     (region->offset + region->len > pmem_len(id, data)))) {
968 #if PMEM_DEBUG
969 		printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
970 #endif
971 		ret = -EINVAL;
972 		goto err;
973 	}
974 
975 	if (operation == PMEM_MAP) {
976 		region_node = kmalloc(sizeof(struct pmem_region_node),
977 			      GFP_KERNEL);
978 		if (!region_node) {
979 			ret = -ENOMEM;
980 #if PMEM_DEBUG
981 			printk(KERN_INFO "No space to allocate metadata!");
982 #endif
983 			goto err;
984 		}
985 		region_node->region = *region;
986 		list_add(&region_node->list, &data->region_list);
987 	} else if (operation == PMEM_UNMAP) {
988 		int found = 0;
989 		list_for_each_safe(elt, elt2, &data->region_list) {
990 			region_node = list_entry(elt, struct pmem_region_node,
991 				      list);
992 			if (region->len == 0 ||
993 			    (region_node->region.offset == region->offset &&
994 			    region_node->region.len == region->len)) {
995 				list_del(elt);
996 				kfree(region_node);
997 				found = 1;
998 			}
999 		}
1000 		if (!found) {
1001 #if PMEM_DEBUG
1002 			printk("pmem: Unmap region does not map any mapped "
1003 				"region!");
1004 #endif
1005 			ret = -EINVAL;
1006 			goto err;
1007 		}
1008 	}
1009 
1010 	if (data->vma && PMEM_IS_SUBMAP(data)) {
1011 		if (operation == PMEM_MAP)
1012 			ret = pmem_remap_pfn_range(id, data->vma, data,
1013 						   region->offset, region->len);
1014 		else if (operation == PMEM_UNMAP)
1015 			ret = pmem_unmap_pfn_range(id, data->vma, data,
1016 						   region->offset, region->len);
1017 	}
1018 
1019 err:
1020 	pmem_unlock_data_and_mm(data, mm);
1021 	return ret;
1022 }
1023 
pmem_revoke(struct file * file,struct pmem_data * data)1024 static void pmem_revoke(struct file *file, struct pmem_data *data)
1025 {
1026 	struct pmem_region_node *region_node;
1027 	struct list_head *elt, *elt2;
1028 	struct mm_struct *mm = NULL;
1029 	int id = get_id(file);
1030 	int ret = 0;
1031 
1032 	data->master_file = NULL;
1033 	ret = pmem_lock_data_and_mm(file, data, &mm);
1034 	/* if lock_data_and_mm fails either the task that mapped the fd, or
1035 	 * the vma that mapped it have already gone away, nothing more
1036 	 * needs to be done */
1037 	if (ret)
1038 		return;
1039 	/* unmap everything */
1040 	/* delete the regions and region list nothing is mapped any more */
1041 	if (data->vma)
1042 		list_for_each_safe(elt, elt2, &data->region_list) {
1043 			region_node = list_entry(elt, struct pmem_region_node,
1044 						 list);
1045 			pmem_unmap_pfn_range(id, data->vma, data,
1046 					     region_node->region.offset,
1047 					     region_node->region.len);
1048 			list_del(elt);
1049 			kfree(region_node);
1050 	}
1051 	/* delete the master file */
1052 	pmem_unlock_data_and_mm(data, mm);
1053 }
1054 
pmem_get_size(struct pmem_region * region,struct file * file)1055 static void pmem_get_size(struct pmem_region *region, struct file *file)
1056 {
1057 	struct pmem_data *data = (struct pmem_data *)file->private_data;
1058 	int id = get_id(file);
1059 
1060 	if (!has_allocation(file)) {
1061 		region->offset = 0;
1062 		region->len = 0;
1063 		return;
1064 	} else {
1065 		region->offset = pmem_start_addr(id, data);
1066 		region->len = pmem_len(id, data);
1067 	}
1068 	DLOG("offset %lx len %lx\n", region->offset, region->len);
1069 }
1070 
1071 
pmem_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1072 static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1073 {
1074 	struct pmem_data *data;
1075 	int id = get_id(file);
1076 
1077 	switch (cmd) {
1078 	case PMEM_GET_PHYS:
1079 		{
1080 			struct pmem_region region;
1081 			DLOG("get_phys\n");
1082 			if (!has_allocation(file)) {
1083 				region.offset = 0;
1084 				region.len = 0;
1085 			} else {
1086 				data = (struct pmem_data *)file->private_data;
1087 				region.offset = pmem_start_addr(id, data);
1088 				region.len = pmem_len(id, data);
1089 			}
1090 			printk(KERN_INFO "pmem: request for physical address of pmem region "
1091 					"from process %d.\n", current->pid);
1092 			if (copy_to_user((void __user *)arg, &region,
1093 						sizeof(struct pmem_region)))
1094 				return -EFAULT;
1095 			break;
1096 		}
1097 	case PMEM_MAP:
1098 		{
1099 			struct pmem_region region;
1100 			if (copy_from_user(&region, (void __user *)arg,
1101 						sizeof(struct pmem_region)))
1102 				return -EFAULT;
1103 			data = (struct pmem_data *)file->private_data;
1104 			return pmem_remap(&region, file, PMEM_MAP);
1105 		}
1106 		break;
1107 	case PMEM_UNMAP:
1108 		{
1109 			struct pmem_region region;
1110 			if (copy_from_user(&region, (void __user *)arg,
1111 						sizeof(struct pmem_region)))
1112 				return -EFAULT;
1113 			data = (struct pmem_data *)file->private_data;
1114 			return pmem_remap(&region, file, PMEM_UNMAP);
1115 			break;
1116 		}
1117 	case PMEM_GET_SIZE:
1118 		{
1119 			struct pmem_region region;
1120 			DLOG("get_size\n");
1121 			pmem_get_size(&region, file);
1122 			if (copy_to_user((void __user *)arg, &region,
1123 						sizeof(struct pmem_region)))
1124 				return -EFAULT;
1125 			break;
1126 		}
1127 	case PMEM_GET_TOTAL_SIZE:
1128 		{
1129 			struct pmem_region region;
1130 			DLOG("get total size\n");
1131 			region.offset = 0;
1132 			get_id(file);
1133 			region.len = pmem[id].size;
1134 			if (copy_to_user((void __user *)arg, &region,
1135 						sizeof(struct pmem_region)))
1136 				return -EFAULT;
1137 			break;
1138 		}
1139 	case PMEM_ALLOCATE:
1140 		{
1141 			if (has_allocation(file))
1142 				return -EINVAL;
1143 			data = (struct pmem_data *)file->private_data;
1144 			data->index = pmem_allocate(id, arg);
1145 			break;
1146 		}
1147 	case PMEM_CONNECT:
1148 		DLOG("connect\n");
1149 		return pmem_connect(arg, file);
1150 		break;
1151 	default:
1152 		if (pmem[id].ioctl)
1153 			return pmem[id].ioctl(file, cmd, arg);
1154 		return -EINVAL;
1155 	}
1156 	return 0;
1157 }
1158 
1159 #if PMEM_DEBUG
debug_open(struct inode * inode,struct file * file)1160 static ssize_t debug_open(struct inode *inode, struct file *file)
1161 {
1162 	file->private_data = inode->i_private;
1163 	return 0;
1164 }
1165 
debug_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1166 static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
1167 			  loff_t *ppos)
1168 {
1169 	struct list_head *elt, *elt2;
1170 	struct pmem_data *data;
1171 	struct pmem_region_node *region_node;
1172 	int id = (int)file->private_data;
1173 	const int debug_bufmax = 4096;
1174 	static char buffer[4096];
1175 	int n = 0;
1176 
1177 	DLOG("debug open\n");
1178 	n = scnprintf(buffer, debug_bufmax,
1179 		      "pid #: mapped regions (offset, len) (offset,len)...\n");
1180 
1181 	down(&pmem[id].data_list_sem);
1182 	list_for_each(elt, &pmem[id].data_list) {
1183 		data = list_entry(elt, struct pmem_data, list);
1184 		down_read(&data->sem);
1185 		n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
1186 				data->pid);
1187 		list_for_each(elt2, &data->region_list) {
1188 			region_node = list_entry(elt2, struct pmem_region_node,
1189 				      list);
1190 			n += scnprintf(buffer + n, debug_bufmax - n,
1191 					"(%lx,%lx) ",
1192 					region_node->region.offset,
1193 					region_node->region.len);
1194 		}
1195 		n += scnprintf(buffer + n, debug_bufmax - n, "\n");
1196 		up_read(&data->sem);
1197 	}
1198 	up(&pmem[id].data_list_sem);
1199 
1200 	n++;
1201 	buffer[n] = 0;
1202 	return simple_read_from_buffer(buf, count, ppos, buffer, n);
1203 }
1204 
1205 static struct file_operations debug_fops = {
1206 	.read = debug_read,
1207 	.open = debug_open,
1208 };
1209 #endif
1210 
1211 #if 0
1212 static struct miscdevice pmem_dev = {
1213 	.name = "pmem",
1214 	.fops = &pmem_fops,
1215 };
1216 #endif
1217 
pmem_setup(struct android_pmem_platform_data * pdata,long (* ioctl)(struct file *,unsigned int,unsigned long),int (* release)(struct inode *,struct file *))1218 int pmem_setup(struct android_pmem_platform_data *pdata,
1219 	       long (*ioctl)(struct file *, unsigned int, unsigned long),
1220 	       int (*release)(struct inode *, struct file *))
1221 {
1222 	int err = 0;
1223 	int i, index = 0;
1224 	int id = id_count;
1225 	id_count++;
1226 
1227 	pmem[id].no_allocator = pdata->no_allocator;
1228 	pmem[id].cached = pdata->cached;
1229 	pmem[id].buffered = pdata->buffered;
1230 	pmem[id].base = pdata->start;
1231 	pmem[id].size = pdata->size;
1232 	pmem[id].ioctl = ioctl;
1233 	pmem[id].release = release;
1234 	init_rwsem(&pmem[id].bitmap_sem);
1235 	init_MUTEX(&pmem[id].data_list_sem);
1236 	INIT_LIST_HEAD(&pmem[id].data_list);
1237 	pmem[id].dev.name = pdata->name;
1238 	pmem[id].dev.minor = id;
1239 	pmem[id].dev.fops = &pmem_fops;
1240 	printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
1241 
1242 	err = misc_register(&pmem[id].dev);
1243 	if (err) {
1244 		printk(KERN_ALERT "Unable to register pmem driver!\n");
1245 		goto err_cant_register_device;
1246 	}
1247 	pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
1248 
1249 	pmem[id].bitmap = kmalloc(pmem[id].num_entries *
1250 				  sizeof(struct pmem_bits), GFP_KERNEL);
1251 	if (!pmem[id].bitmap)
1252 		goto err_no_mem_for_metadata;
1253 
1254 	memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
1255 					  pmem[id].num_entries);
1256 
1257 	for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
1258 		if ((pmem[id].num_entries) &  1<<i) {
1259 			PMEM_ORDER(id, index) = i;
1260 			index = PMEM_NEXT_INDEX(id, index);
1261 		}
1262 	}
1263 
1264 	if (pmem[id].cached)
1265 		pmem[id].vbase = ioremap_cached(pmem[id].base,
1266 						pmem[id].size);
1267 #ifdef ioremap_ext_buffered
1268 	else if (pmem[id].buffered)
1269 		pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
1270 						      pmem[id].size);
1271 #endif
1272 	else
1273 		pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
1274 
1275 	if (pmem[id].vbase == 0)
1276 		goto error_cant_remap;
1277 
1278 	pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
1279 	if (pmem[id].no_allocator)
1280 		pmem[id].allocated = 0;
1281 
1282 #if PMEM_DEBUG
1283 	debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
1284 			    &debug_fops);
1285 #endif
1286 	return 0;
1287 error_cant_remap:
1288 	kfree(pmem[id].bitmap);
1289 err_no_mem_for_metadata:
1290 	misc_deregister(&pmem[id].dev);
1291 err_cant_register_device:
1292 	return -1;
1293 }
1294 
pmem_probe(struct platform_device * pdev)1295 static int pmem_probe(struct platform_device *pdev)
1296 {
1297 	struct android_pmem_platform_data *pdata;
1298 
1299 	if (!pdev || !pdev->dev.platform_data) {
1300 		printk(KERN_ALERT "Unable to probe pmem!\n");
1301 		return -1;
1302 	}
1303 	pdata = pdev->dev.platform_data;
1304 	return pmem_setup(pdata, NULL, NULL);
1305 }
1306 
1307 
pmem_remove(struct platform_device * pdev)1308 static int pmem_remove(struct platform_device *pdev)
1309 {
1310 	int id = pdev->id;
1311 	__free_page(pfn_to_page(pmem[id].garbage_pfn));
1312 	misc_deregister(&pmem[id].dev);
1313 	return 0;
1314 }
1315 
1316 static struct platform_driver pmem_driver = {
1317 	.probe = pmem_probe,
1318 	.remove = pmem_remove,
1319 	.driver = { .name = "android_pmem" }
1320 };
1321 
1322 
pmem_init(void)1323 static int __init pmem_init(void)
1324 {
1325 	return platform_driver_register(&pmem_driver);
1326 }
1327 
pmem_exit(void)1328 static void __exit pmem_exit(void)
1329 {
1330 	platform_driver_unregister(&pmem_driver);
1331 }
1332 
1333 module_init(pmem_init);
1334 module_exit(pmem_exit);
1335 
1336