• Home
  • Raw
  • Download

Lines Matching refs:page

55 	struct page *page;  member
64 struct page **ret_page);
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) in __dma_clear_buffer() argument
113 if (PageHighMem(page)) { in __dma_clear_buffer()
114 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); in __dma_clear_buffer()
117 void *ptr = kmap_atomic(page); in __dma_clear_buffer()
122 page++; in __dma_clear_buffer()
128 void *ptr = page_address(page); in __dma_clear_buffer()
141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, in __dma_alloc_buffer()
145 struct page *page, *p, *e; in __dma_alloc_buffer() local
147 page = alloc_pages(gfp, order); in __dma_alloc_buffer()
148 if (!page) in __dma_alloc_buffer()
154 split_page(page, order); in __dma_alloc_buffer()
155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
158 __dma_clear_buffer(page, size, coherent_flag); in __dma_alloc_buffer()
160 return page; in __dma_alloc_buffer()
166 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
168 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
170 while (page < e) { in __dma_free_buffer()
171 __free_page(page); in __dma_free_buffer()
172 page++; in __dma_free_buffer()
177 pgprot_t prot, struct page **ret_page,
182 pgprot_t prot, struct page **ret_page,
204 struct page *page; in atomic_pool_init() local
216 &page, atomic_pool_init, true, NORMAL, in atomic_pool_init()
220 &page, atomic_pool_init, true); in atomic_pool_init()
225 page_to_phys(page), in atomic_pool_init()
312 struct page *page = virt_to_page((void *)addr); in __dma_update_pte() local
315 set_pte_ext(pte, mk_pte(page, prot), 0); in __dma_update_pte()
319 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
321 unsigned long start = (unsigned long) page_address(page); in __dma_remap()
329 pgprot_t prot, struct page **ret_page, in __alloc_remap_buffer()
332 struct page *page; in __alloc_remap_buffer() local
338 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); in __alloc_remap_buffer()
339 if (!page) in __alloc_remap_buffer()
344 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_remap_buffer()
346 __dma_free_buffer(page, size); in __alloc_remap_buffer()
351 *ret_page = page; in __alloc_remap_buffer()
355 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool()
392 pgprot_t prot, struct page **ret_page, in __alloc_from_contiguous()
398 struct page *page; in __alloc_from_contiguous() local
401 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); in __alloc_from_contiguous()
402 if (!page) in __alloc_from_contiguous()
405 __dma_clear_buffer(page, size, coherent_flag); in __alloc_from_contiguous()
410 if (PageHighMem(page)) { in __alloc_from_contiguous()
411 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_from_contiguous()
413 dma_release_from_contiguous(dev, page, count); in __alloc_from_contiguous()
417 __dma_remap(page, size, prot); in __alloc_from_contiguous()
418 ptr = page_address(page); in __alloc_from_contiguous()
422 *ret_page = page; in __alloc_from_contiguous()
426 static void __free_from_contiguous(struct device *dev, struct page *page, in __free_from_contiguous() argument
430 if (PageHighMem(page)) in __free_from_contiguous()
433 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
435 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
447 struct page **ret_page) in __alloc_simple_buffer()
449 struct page *page; in __alloc_simple_buffer() local
451 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); in __alloc_simple_buffer()
452 if (!page) in __alloc_simple_buffer()
455 *ret_page = page; in __alloc_simple_buffer()
456 return page_address(page); in __alloc_simple_buffer()
460 struct page **ret_page) in simple_allocator_alloc()
468 __dma_free_buffer(args->page, args->size); in simple_allocator_free()
477 struct page **ret_page) in cma_allocator_alloc()
487 __free_from_contiguous(args->dev, args->page, args->cpu_addr, in cma_allocator_free()
497 struct page **ret_page) in pool_allocator_alloc()
513 struct page **ret_page) in remap_allocator_alloc()
525 __dma_free_buffer(args->page, args->size); in remap_allocator_free()
538 struct page *page = NULL; in __dma_alloc() local
584 addr = buf->allocator->alloc(&args, &page); in __dma_alloc()
586 if (page) { in __dma_alloc()
589 *handle = phys_to_dma(dev, page_to_phys(page)); in __dma_alloc()
590 buf->virt = args.want_vaddr ? addr : page; in __dma_alloc()
599 return args.want_vaddr ? addr : page; in __dma_alloc()
609 struct page *page = phys_to_page(dma_to_phys(dev, handle)); in __arm_dma_free() local
615 .page = page, in __arm_dma_free()
627 static void dma_cache_maint_page(struct page *page, unsigned long offset, in dma_cache_maint_page() argument
634 pfn = page_to_pfn(page) + offset / PAGE_SIZE; in dma_cache_maint_page()
647 page = pfn_to_page(pfn); in dma_cache_maint_page()
649 if (PageHighMem(page)) { in dma_cache_maint_page()
654 vaddr = kmap_atomic(page); in dma_cache_maint_page()
658 vaddr = kmap_high_get(page); in dma_cache_maint_page()
661 kunmap_high(page); in dma_cache_maint_page()
665 vaddr = page_address(page) + offset; in dma_cache_maint_page()
679 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, in __dma_page_cpu_to_dev() argument
684 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
686 paddr = page_to_phys(page) + off; in __dma_page_cpu_to_dev()
695 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, in __dma_page_dev_to_cpu() argument
698 phys_addr_t paddr = page_to_phys(page) + off; in __dma_page_dev_to_cpu()
705 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
852 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer()
856 struct page **pages; in __iommu_alloc_buffer()
858 int array_size = count * sizeof(struct page *); in __iommu_alloc_buffer()
872 struct page *page; in __iommu_alloc_buffer() local
874 page = dma_alloc_from_contiguous(dev, count, order, in __iommu_alloc_buffer()
876 if (!page) in __iommu_alloc_buffer()
879 __dma_clear_buffer(page, size, coherent_flag); in __iommu_alloc_buffer()
882 pages[i] = page + i; in __iommu_alloc_buffer()
943 static int __iommu_free_buffer(struct device *dev, struct page **pages, in __iommu_free_buffer()
965 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, in __iommu_create_mapping()
1021 static struct page **__atomic_get_pages(void *addr) in __atomic_get_pages()
1023 struct page *page; in __atomic_get_pages() local
1027 page = phys_to_page(phys); in __atomic_get_pages()
1029 return (struct page **)page; in __atomic_get_pages()
1032 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) in __iommu_get_pages()
1047 struct page *page; in __iommu_alloc_simple() local
1051 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __iommu_alloc_simple()
1053 addr = __alloc_from_pool(size, &page); in __iommu_alloc_simple()
1057 *handle = __iommu_create_mapping(dev, &page, size, attrs); in __iommu_alloc_simple()
1082 struct page **pages; in arm_iommu_alloc_attrs()
1122 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_mmap_attrs()
1150 struct page **pages; in arm_iommu_free_attrs()
1176 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_get_sgtable()
1371 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, in arm_iommu_map_page() argument
1380 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1388 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, in arm_iommu_map_page()
1413 struct page *page; in arm_iommu_unmap_page() local
1421 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page()
1422 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1491 struct page *page; in arm_iommu_sync_single_for_cpu() local
1497 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu()
1498 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
1506 struct page *page; in arm_iommu_sync_single_for_device() local
1512 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device()
1513 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()