1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/smp_lock.h>
30
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 /*
39 * Architectures vary in how they handle caching for addresses
40 * outside of main memory.
41 *
42 */
uncached_access(struct file * file,unsigned long addr)43 static inline int uncached_access(struct file *file, unsigned long addr)
44 {
45 #if defined(CONFIG_IA64)
46 /*
47 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
48 */
49 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
50 #elif defined(CONFIG_MIPS)
51 {
52 extern int __uncached_access(struct file *file,
53 unsigned long addr);
54
55 return __uncached_access(file, addr);
56 }
57 #else
58 /*
59 * Accessing memory above the top the kernel knows about or through a file pointer
60 * that was marked O_SYNC will be done non-cached.
61 */
62 if (file->f_flags & O_SYNC)
63 return 1;
64 return addr >= __pa(high_memory);
65 #endif
66 }
67
68 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
valid_phys_addr_range(unsigned long addr,size_t count)69 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
70 {
71 if (addr + count > __pa(high_memory))
72 return 0;
73
74 return 1;
75 }
76
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)77 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
78 {
79 return 1;
80 }
81 #endif
82
83 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
84 #ifdef CONFIG_STRICT_DEVMEM
range_is_allowed(unsigned long pfn,unsigned long size)85 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
86 {
87 u64 from = ((u64)pfn) << PAGE_SHIFT;
88 u64 to = from + size;
89 u64 cursor = from;
90
91 while (cursor < to) {
92 if (!devmem_is_allowed(pfn)) {
93 printk(KERN_INFO
94 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
95 current->comm, from, to);
96 return 0;
97 }
98 cursor += PAGE_SIZE;
99 pfn++;
100 }
101 return 1;
102 }
103 #else
range_is_allowed(unsigned long pfn,unsigned long size)104 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
105 {
106 return 1;
107 }
108 #endif
109 #endif
110
111 #ifdef CONFIG_DEVMEM
unxlate_dev_mem_ptr(unsigned long phys,void * addr)112 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
113 {
114 }
115
116 /*
117 * This funcion reads the *physical* memory. The f_pos points directly to the
118 * memory location.
119 */
read_mem(struct file * file,char __user * buf,size_t count,loff_t * ppos)120 static ssize_t read_mem(struct file * file, char __user * buf,
121 size_t count, loff_t *ppos)
122 {
123 unsigned long p = *ppos;
124 ssize_t read, sz;
125 char *ptr;
126
127 if (!valid_phys_addr_range(p, count))
128 return -EFAULT;
129 read = 0;
130 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
131 /* we don't have page 0 mapped on sparc and m68k.. */
132 if (p < PAGE_SIZE) {
133 sz = PAGE_SIZE - p;
134 if (sz > count)
135 sz = count;
136 if (sz > 0) {
137 if (clear_user(buf, sz))
138 return -EFAULT;
139 buf += sz;
140 p += sz;
141 count -= sz;
142 read += sz;
143 }
144 }
145 #endif
146
147 while (count > 0) {
148 /*
149 * Handle first page in case it's not aligned
150 */
151 if (-p & (PAGE_SIZE - 1))
152 sz = -p & (PAGE_SIZE - 1);
153 else
154 sz = PAGE_SIZE;
155
156 sz = min_t(unsigned long, sz, count);
157
158 if (!range_is_allowed(p >> PAGE_SHIFT, count))
159 return -EPERM;
160
161 /*
162 * On ia64 if a page has been mapped somewhere as
163 * uncached, then it must also be accessed uncached
164 * by the kernel or data corruption may occur
165 */
166 ptr = xlate_dev_mem_ptr(p);
167 if (!ptr)
168 return -EFAULT;
169
170 if (copy_to_user(buf, ptr, sz)) {
171 unxlate_dev_mem_ptr(p, ptr);
172 return -EFAULT;
173 }
174
175 unxlate_dev_mem_ptr(p, ptr);
176
177 buf += sz;
178 p += sz;
179 count -= sz;
180 read += sz;
181 }
182
183 *ppos += read;
184 return read;
185 }
186
write_mem(struct file * file,const char __user * buf,size_t count,loff_t * ppos)187 static ssize_t write_mem(struct file * file, const char __user * buf,
188 size_t count, loff_t *ppos)
189 {
190 unsigned long p = *ppos;
191 ssize_t written, sz;
192 unsigned long copied;
193 void *ptr;
194
195 if (!valid_phys_addr_range(p, count))
196 return -EFAULT;
197
198 written = 0;
199
200 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
201 /* we don't have page 0 mapped on sparc and m68k.. */
202 if (p < PAGE_SIZE) {
203 unsigned long sz = PAGE_SIZE - p;
204 if (sz > count)
205 sz = count;
206 /* Hmm. Do something? */
207 buf += sz;
208 p += sz;
209 count -= sz;
210 written += sz;
211 }
212 #endif
213
214 while (count > 0) {
215 /*
216 * Handle first page in case it's not aligned
217 */
218 if (-p & (PAGE_SIZE - 1))
219 sz = -p & (PAGE_SIZE - 1);
220 else
221 sz = PAGE_SIZE;
222
223 sz = min_t(unsigned long, sz, count);
224
225 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
226 return -EPERM;
227
228 /*
229 * On ia64 if a page has been mapped somewhere as
230 * uncached, then it must also be accessed uncached
231 * by the kernel or data corruption may occur
232 */
233 ptr = xlate_dev_mem_ptr(p);
234 if (!ptr) {
235 if (written)
236 break;
237 return -EFAULT;
238 }
239
240 copied = copy_from_user(ptr, buf, sz);
241 if (copied) {
242 written += sz - copied;
243 unxlate_dev_mem_ptr(p, ptr);
244 if (written)
245 break;
246 return -EFAULT;
247 }
248
249 unxlate_dev_mem_ptr(p, ptr);
250
251 buf += sz;
252 p += sz;
253 count -= sz;
254 written += sz;
255 }
256
257 *ppos += written;
258 return written;
259 }
260 #endif /* CONFIG_DEVMEM */
261
262 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
263
phys_mem_access_prot_allowed(struct file * file,unsigned long pfn,unsigned long size,pgprot_t * vma_prot)264 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
265 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
266 {
267 return 1;
268 }
269
270 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t vma_prot)271 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
272 unsigned long size, pgprot_t vma_prot)
273 {
274 #ifdef pgprot_noncached
275 unsigned long offset = pfn << PAGE_SHIFT;
276
277 if (uncached_access(file, offset))
278 return pgprot_noncached(vma_prot);
279 #endif
280 return vma_prot;
281 }
282 #endif
283
284 #ifndef CONFIG_MMU
get_unmapped_area_mem(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)285 static unsigned long get_unmapped_area_mem(struct file *file,
286 unsigned long addr,
287 unsigned long len,
288 unsigned long pgoff,
289 unsigned long flags)
290 {
291 if (!valid_mmap_phys_addr_range(pgoff, len))
292 return (unsigned long) -EINVAL;
293 return pgoff << PAGE_SHIFT;
294 }
295
296 /* can't do an in-place private mapping if there's no MMU */
private_mapping_ok(struct vm_area_struct * vma)297 static inline int private_mapping_ok(struct vm_area_struct *vma)
298 {
299 return vma->vm_flags & VM_MAYSHARE;
300 }
301 #else
302 #define get_unmapped_area_mem NULL
303
private_mapping_ok(struct vm_area_struct * vma)304 static inline int private_mapping_ok(struct vm_area_struct *vma)
305 {
306 return 1;
307 }
308 #endif
309
310 void __attribute__((weak))
map_devmem(unsigned long pfn,unsigned long len,pgprot_t prot)311 map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
312 {
313 /* nothing. architectures can override. */
314 }
315
316 void __attribute__((weak))
unmap_devmem(unsigned long pfn,unsigned long len,pgprot_t prot)317 unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
318 {
319 /* nothing. architectures can override. */
320 }
321
mmap_mem_open(struct vm_area_struct * vma)322 static void mmap_mem_open(struct vm_area_struct *vma)
323 {
324 map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
325 vma->vm_page_prot);
326 }
327
mmap_mem_close(struct vm_area_struct * vma)328 static void mmap_mem_close(struct vm_area_struct *vma)
329 {
330 unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
331 vma->vm_page_prot);
332 }
333
334 static struct vm_operations_struct mmap_mem_ops = {
335 .open = mmap_mem_open,
336 .close = mmap_mem_close,
337 #ifdef CONFIG_HAVE_IOREMAP_PROT
338 .access = generic_access_phys
339 #endif
340 };
341
mmap_mem(struct file * file,struct vm_area_struct * vma)342 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
343 {
344 size_t size = vma->vm_end - vma->vm_start;
345
346 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
347 return -EINVAL;
348
349 if (!private_mapping_ok(vma))
350 return -ENOSYS;
351
352 if (!range_is_allowed(vma->vm_pgoff, size))
353 return -EPERM;
354
355 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
356 &vma->vm_page_prot))
357 return -EINVAL;
358
359 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
360 size,
361 vma->vm_page_prot);
362
363 vma->vm_ops = &mmap_mem_ops;
364
365 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
366 if (remap_pfn_range(vma,
367 vma->vm_start,
368 vma->vm_pgoff,
369 size,
370 vma->vm_page_prot)) {
371 unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
372 return -EAGAIN;
373 }
374 return 0;
375 }
376 #endif /* CONFIG_DEVMEM */
377
378 #ifdef CONFIG_DEVKMEM
mmap_kmem(struct file * file,struct vm_area_struct * vma)379 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
380 {
381 unsigned long pfn;
382
383 /* Turn a kernel-virtual address into a physical page frame */
384 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
385
386 /*
387 * RED-PEN: on some architectures there is more mapped memory
388 * than available in mem_map which pfn_valid checks
389 * for. Perhaps should add a new macro here.
390 *
391 * RED-PEN: vmalloc is not supported right now.
392 */
393 if (!pfn_valid(pfn))
394 return -EIO;
395
396 vma->vm_pgoff = pfn;
397 return mmap_mem(file, vma);
398 }
399 #endif
400
401 #ifdef CONFIG_CRASH_DUMP
402 /*
403 * Read memory corresponding to the old kernel.
404 */
read_oldmem(struct file * file,char __user * buf,size_t count,loff_t * ppos)405 static ssize_t read_oldmem(struct file *file, char __user *buf,
406 size_t count, loff_t *ppos)
407 {
408 unsigned long pfn, offset;
409 size_t read = 0, csize;
410 int rc = 0;
411
412 while (count) {
413 pfn = *ppos / PAGE_SIZE;
414 if (pfn > saved_max_pfn)
415 return read;
416
417 offset = (unsigned long)(*ppos % PAGE_SIZE);
418 if (count > PAGE_SIZE - offset)
419 csize = PAGE_SIZE - offset;
420 else
421 csize = count;
422
423 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
424 if (rc < 0)
425 return rc;
426 buf += csize;
427 *ppos += csize;
428 read += csize;
429 count -= csize;
430 }
431 return read;
432 }
433 #endif
434
435 #ifdef CONFIG_DEVKMEM
436 /*
437 * This function reads the *virtual* memory as seen by the kernel.
438 */
read_kmem(struct file * file,char __user * buf,size_t count,loff_t * ppos)439 static ssize_t read_kmem(struct file *file, char __user *buf,
440 size_t count, loff_t *ppos)
441 {
442 unsigned long p = *ppos;
443 ssize_t low_count, read, sz;
444 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
445
446 read = 0;
447 if (p < (unsigned long) high_memory) {
448 low_count = count;
449 if (count > (unsigned long) high_memory - p)
450 low_count = (unsigned long) high_memory - p;
451
452 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
453 /* we don't have page 0 mapped on sparc and m68k.. */
454 if (p < PAGE_SIZE && low_count > 0) {
455 size_t tmp = PAGE_SIZE - p;
456 if (tmp > low_count) tmp = low_count;
457 if (clear_user(buf, tmp))
458 return -EFAULT;
459 buf += tmp;
460 p += tmp;
461 read += tmp;
462 low_count -= tmp;
463 count -= tmp;
464 }
465 #endif
466 while (low_count > 0) {
467 /*
468 * Handle first page in case it's not aligned
469 */
470 if (-p & (PAGE_SIZE - 1))
471 sz = -p & (PAGE_SIZE - 1);
472 else
473 sz = PAGE_SIZE;
474
475 sz = min_t(unsigned long, sz, low_count);
476
477 /*
478 * On ia64 if a page has been mapped somewhere as
479 * uncached, then it must also be accessed uncached
480 * by the kernel or data corruption may occur
481 */
482 kbuf = xlate_dev_kmem_ptr((char *)p);
483
484 if (copy_to_user(buf, kbuf, sz))
485 return -EFAULT;
486 buf += sz;
487 p += sz;
488 read += sz;
489 low_count -= sz;
490 count -= sz;
491 }
492 }
493
494 if (count > 0) {
495 kbuf = (char *)__get_free_page(GFP_KERNEL);
496 if (!kbuf)
497 return -ENOMEM;
498 while (count > 0) {
499 int len = count;
500
501 if (len > PAGE_SIZE)
502 len = PAGE_SIZE;
503 len = vread(kbuf, (char *)p, len);
504 if (!len)
505 break;
506 if (copy_to_user(buf, kbuf, len)) {
507 free_page((unsigned long)kbuf);
508 return -EFAULT;
509 }
510 count -= len;
511 buf += len;
512 read += len;
513 p += len;
514 }
515 free_page((unsigned long)kbuf);
516 }
517 *ppos = p;
518 return read;
519 }
520
521
522 static inline ssize_t
do_write_kmem(void * p,unsigned long realp,const char __user * buf,size_t count,loff_t * ppos)523 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
524 size_t count, loff_t *ppos)
525 {
526 ssize_t written, sz;
527 unsigned long copied;
528
529 written = 0;
530 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
531 /* we don't have page 0 mapped on sparc and m68k.. */
532 if (realp < PAGE_SIZE) {
533 unsigned long sz = PAGE_SIZE - realp;
534 if (sz > count)
535 sz = count;
536 /* Hmm. Do something? */
537 buf += sz;
538 p += sz;
539 realp += sz;
540 count -= sz;
541 written += sz;
542 }
543 #endif
544
545 while (count > 0) {
546 char *ptr;
547 /*
548 * Handle first page in case it's not aligned
549 */
550 if (-realp & (PAGE_SIZE - 1))
551 sz = -realp & (PAGE_SIZE - 1);
552 else
553 sz = PAGE_SIZE;
554
555 sz = min_t(unsigned long, sz, count);
556
557 /*
558 * On ia64 if a page has been mapped somewhere as
559 * uncached, then it must also be accessed uncached
560 * by the kernel or data corruption may occur
561 */
562 ptr = xlate_dev_kmem_ptr(p);
563
564 copied = copy_from_user(ptr, buf, sz);
565 if (copied) {
566 written += sz - copied;
567 if (written)
568 break;
569 return -EFAULT;
570 }
571 buf += sz;
572 p += sz;
573 realp += sz;
574 count -= sz;
575 written += sz;
576 }
577
578 *ppos += written;
579 return written;
580 }
581
582
583 /*
584 * This function writes to the *virtual* memory as seen by the kernel.
585 */
write_kmem(struct file * file,const char __user * buf,size_t count,loff_t * ppos)586 static ssize_t write_kmem(struct file * file, const char __user * buf,
587 size_t count, loff_t *ppos)
588 {
589 unsigned long p = *ppos;
590 ssize_t wrote = 0;
591 ssize_t virtr = 0;
592 ssize_t written;
593 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
594
595 if (p < (unsigned long) high_memory) {
596
597 wrote = count;
598 if (count > (unsigned long) high_memory - p)
599 wrote = (unsigned long) high_memory - p;
600
601 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
602 if (written != wrote)
603 return written;
604 wrote = written;
605 p += wrote;
606 buf += wrote;
607 count -= wrote;
608 }
609
610 if (count > 0) {
611 kbuf = (char *)__get_free_page(GFP_KERNEL);
612 if (!kbuf)
613 return wrote ? wrote : -ENOMEM;
614 while (count > 0) {
615 int len = count;
616
617 if (len > PAGE_SIZE)
618 len = PAGE_SIZE;
619 if (len) {
620 written = copy_from_user(kbuf, buf, len);
621 if (written) {
622 if (wrote + virtr)
623 break;
624 free_page((unsigned long)kbuf);
625 return -EFAULT;
626 }
627 }
628 len = vwrite(kbuf, (char *)p, len);
629 count -= len;
630 buf += len;
631 virtr += len;
632 p += len;
633 }
634 free_page((unsigned long)kbuf);
635 }
636
637 *ppos = p;
638 return virtr + wrote;
639 }
640 #endif
641
642 #ifdef CONFIG_DEVPORT
read_port(struct file * file,char __user * buf,size_t count,loff_t * ppos)643 static ssize_t read_port(struct file * file, char __user * buf,
644 size_t count, loff_t *ppos)
645 {
646 unsigned long i = *ppos;
647 char __user *tmp = buf;
648
649 if (!access_ok(VERIFY_WRITE, buf, count))
650 return -EFAULT;
651 while (count-- > 0 && i < 65536) {
652 if (__put_user(inb(i),tmp) < 0)
653 return -EFAULT;
654 i++;
655 tmp++;
656 }
657 *ppos = i;
658 return tmp-buf;
659 }
660
write_port(struct file * file,const char __user * buf,size_t count,loff_t * ppos)661 static ssize_t write_port(struct file * file, const char __user * buf,
662 size_t count, loff_t *ppos)
663 {
664 unsigned long i = *ppos;
665 const char __user * tmp = buf;
666
667 if (!access_ok(VERIFY_READ,buf,count))
668 return -EFAULT;
669 while (count-- > 0 && i < 65536) {
670 char c;
671 if (__get_user(c, tmp)) {
672 if (tmp > buf)
673 break;
674 return -EFAULT;
675 }
676 outb(c,i);
677 i++;
678 tmp++;
679 }
680 *ppos = i;
681 return tmp-buf;
682 }
683 #endif
684
read_null(struct file * file,char __user * buf,size_t count,loff_t * ppos)685 static ssize_t read_null(struct file * file, char __user * buf,
686 size_t count, loff_t *ppos)
687 {
688 return 0;
689 }
690
write_null(struct file * file,const char __user * buf,size_t count,loff_t * ppos)691 static ssize_t write_null(struct file * file, const char __user * buf,
692 size_t count, loff_t *ppos)
693 {
694 return count;
695 }
696
pipe_to_null(struct pipe_inode_info * info,struct pipe_buffer * buf,struct splice_desc * sd)697 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
698 struct splice_desc *sd)
699 {
700 return sd->len;
701 }
702
splice_write_null(struct pipe_inode_info * pipe,struct file * out,loff_t * ppos,size_t len,unsigned int flags)703 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
704 loff_t *ppos, size_t len, unsigned int flags)
705 {
706 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
707 }
708
read_zero(struct file * file,char __user * buf,size_t count,loff_t * ppos)709 static ssize_t read_zero(struct file * file, char __user * buf,
710 size_t count, loff_t *ppos)
711 {
712 size_t written;
713
714 if (!count)
715 return 0;
716
717 if (!access_ok(VERIFY_WRITE, buf, count))
718 return -EFAULT;
719
720 written = 0;
721 while (count) {
722 unsigned long unwritten;
723 size_t chunk = count;
724
725 if (chunk > PAGE_SIZE)
726 chunk = PAGE_SIZE; /* Just for latency reasons */
727 unwritten = clear_user(buf, chunk);
728 written += chunk - unwritten;
729 if (unwritten)
730 break;
731 buf += chunk;
732 count -= chunk;
733 cond_resched();
734 }
735 return written ? written : -EFAULT;
736 }
737
mmap_zero(struct file * file,struct vm_area_struct * vma)738 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
739 {
740 #ifndef CONFIG_MMU
741 return -ENOSYS;
742 #endif
743 if (vma->vm_flags & VM_SHARED)
744 return shmem_zero_setup(vma);
745 return 0;
746 }
747
write_full(struct file * file,const char __user * buf,size_t count,loff_t * ppos)748 static ssize_t write_full(struct file * file, const char __user * buf,
749 size_t count, loff_t *ppos)
750 {
751 return -ENOSPC;
752 }
753
754 /*
755 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
756 * can fopen() both devices with "a" now. This was previously impossible.
757 * -- SRB.
758 */
759
null_lseek(struct file * file,loff_t offset,int orig)760 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
761 {
762 return file->f_pos = 0;
763 }
764
765 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
766
767 /*
768 * The memory devices use the full 32/64 bits of the offset, and so we cannot
769 * check against negative addresses: they are ok. The return value is weird,
770 * though, in that case (0).
771 *
772 * also note that seeking relative to the "end of file" isn't supported:
773 * it has no meaning, so it returns -EINVAL.
774 */
memory_lseek(struct file * file,loff_t offset,int orig)775 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
776 {
777 loff_t ret;
778
779 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
780 switch (orig) {
781 case 0:
782 file->f_pos = offset;
783 ret = file->f_pos;
784 force_successful_syscall_return();
785 break;
786 case 1:
787 file->f_pos += offset;
788 ret = file->f_pos;
789 force_successful_syscall_return();
790 break;
791 default:
792 ret = -EINVAL;
793 }
794 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
795 return ret;
796 }
797
798 #endif
799
800 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
open_port(struct inode * inode,struct file * filp)801 static int open_port(struct inode * inode, struct file * filp)
802 {
803 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
804 }
805 #endif
806
807 #define zero_lseek null_lseek
808 #define full_lseek null_lseek
809 #define write_zero write_null
810 #define read_full read_zero
811 #define open_mem open_port
812 #define open_kmem open_mem
813 #define open_oldmem open_mem
814
815 #ifdef CONFIG_DEVMEM
816 static const struct file_operations mem_fops = {
817 .llseek = memory_lseek,
818 .read = read_mem,
819 .write = write_mem,
820 .mmap = mmap_mem,
821 .open = open_mem,
822 .get_unmapped_area = get_unmapped_area_mem,
823 };
824 #endif
825
826 #ifdef CONFIG_DEVKMEM
827 static const struct file_operations kmem_fops = {
828 .llseek = memory_lseek,
829 .read = read_kmem,
830 .write = write_kmem,
831 .mmap = mmap_kmem,
832 .open = open_kmem,
833 .get_unmapped_area = get_unmapped_area_mem,
834 };
835 #endif
836
837 static const struct file_operations null_fops = {
838 .llseek = null_lseek,
839 .read = read_null,
840 .write = write_null,
841 .splice_write = splice_write_null,
842 };
843
844 #ifdef CONFIG_DEVPORT
845 static const struct file_operations port_fops = {
846 .llseek = memory_lseek,
847 .read = read_port,
848 .write = write_port,
849 .open = open_port,
850 };
851 #endif
852
853 static const struct file_operations zero_fops = {
854 .llseek = zero_lseek,
855 .read = read_zero,
856 .write = write_zero,
857 .mmap = mmap_zero,
858 };
859
860 /*
861 * capabilities for /dev/zero
862 * - permits private mappings, "copies" are taken of the source of zeros
863 */
864 static struct backing_dev_info zero_bdi = {
865 .capabilities = BDI_CAP_MAP_COPY,
866 };
867
868 static const struct file_operations full_fops = {
869 .llseek = full_lseek,
870 .read = read_full,
871 .write = write_full,
872 };
873
874 #ifdef CONFIG_CRASH_DUMP
875 static const struct file_operations oldmem_fops = {
876 .read = read_oldmem,
877 .open = open_oldmem,
878 };
879 #endif
880
kmsg_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)881 static ssize_t kmsg_write(struct file * file, const char __user * buf,
882 size_t count, loff_t *ppos)
883 {
884 char *tmp;
885 ssize_t ret;
886
887 tmp = kmalloc(count + 1, GFP_KERNEL);
888 if (tmp == NULL)
889 return -ENOMEM;
890 ret = -EFAULT;
891 if (!copy_from_user(tmp, buf, count)) {
892 tmp[count] = 0;
893 ret = printk("%s", tmp);
894 if (ret > count)
895 /* printk can add a prefix */
896 ret = count;
897 }
898 kfree(tmp);
899 return ret;
900 }
901
902 static const struct file_operations kmsg_fops = {
903 .write = kmsg_write,
904 };
905
memory_open(struct inode * inode,struct file * filp)906 static int memory_open(struct inode * inode, struct file * filp)
907 {
908 int ret = 0;
909
910 lock_kernel();
911 switch (iminor(inode)) {
912 #ifdef CONFIG_DEVMEM
913 case 1:
914 filp->f_op = &mem_fops;
915 filp->f_mapping->backing_dev_info =
916 &directly_mappable_cdev_bdi;
917 break;
918 #endif
919 #ifdef CONFIG_DEVKMEM
920 case 2:
921 filp->f_op = &kmem_fops;
922 filp->f_mapping->backing_dev_info =
923 &directly_mappable_cdev_bdi;
924 break;
925 #endif
926 case 3:
927 filp->f_op = &null_fops;
928 break;
929 #ifdef CONFIG_DEVPORT
930 case 4:
931 filp->f_op = &port_fops;
932 break;
933 #endif
934 case 5:
935 filp->f_mapping->backing_dev_info = &zero_bdi;
936 filp->f_op = &zero_fops;
937 break;
938 case 7:
939 filp->f_op = &full_fops;
940 break;
941 case 8:
942 filp->f_op = &random_fops;
943 break;
944 case 9:
945 filp->f_op = &urandom_fops;
946 break;
947 case 11:
948 filp->f_op = &kmsg_fops;
949 break;
950 #ifdef CONFIG_CRASH_DUMP
951 case 12:
952 filp->f_op = &oldmem_fops;
953 break;
954 #endif
955 default:
956 unlock_kernel();
957 return -ENXIO;
958 }
959 if (filp->f_op && filp->f_op->open)
960 ret = filp->f_op->open(inode,filp);
961 unlock_kernel();
962 return ret;
963 }
964
965 static const struct file_operations memory_fops = {
966 .open = memory_open, /* just a selector for the real open */
967 };
968
969 static const struct {
970 unsigned int minor;
971 char *name;
972 umode_t mode;
973 const struct file_operations *fops;
974 } devlist[] = { /* list of minor devices */
975 #ifdef CONFIG_DEVMEM
976 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
977 #endif
978 #ifdef CONFIG_DEVKMEM
979 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
980 #endif
981 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
982 #ifdef CONFIG_DEVPORT
983 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
984 #endif
985 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
986 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
987 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
988 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
989 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
990 #ifdef CONFIG_CRASH_DUMP
991 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
992 #endif
993 };
994
995 static struct class *mem_class;
996
chr_dev_init(void)997 static int __init chr_dev_init(void)
998 {
999 int i;
1000 int err;
1001
1002 err = bdi_init(&zero_bdi);
1003 if (err)
1004 return err;
1005
1006 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
1007 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
1008
1009 mem_class = class_create(THIS_MODULE, "mem");
1010 for (i = 0; i < ARRAY_SIZE(devlist); i++)
1011 device_create(mem_class, NULL,
1012 MKDEV(MEM_MAJOR, devlist[i].minor), NULL,
1013 devlist[i].name);
1014
1015 return 0;
1016 }
1017
1018 fs_initcall(chr_dev_init);
1019