• Home
  • Raw
  • Download

Lines Matching full:mem

60 	struct mm_iommu_table_group_mem_t *mem, *mem2;  in mm_iommu_do_alloc()  local
73 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in mm_iommu_do_alloc()
74 if (!mem) { in mm_iommu_do_alloc()
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
81 mem->dev_hpa = dev_hpa; in mm_iommu_do_alloc()
84 mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA; in mm_iommu_do_alloc()
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc()
93 if (!mem->hpas) { in mm_iommu_do_alloc()
94 kfree(mem); in mm_iommu_do_alloc()
108 mem->hpages + entry, NULL); in mm_iommu_do_alloc()
125 atomic64_set(&mem->mapped, 1); in mm_iommu_do_alloc()
126 mem->used = 1; in mm_iommu_do_alloc()
127 mem->ua = ua; in mm_iommu_do_alloc()
128 mem->entries = entries; in mm_iommu_do_alloc()
143 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { in mm_iommu_do_alloc()
151 struct page *page = mem->hpages[i]; in mm_iommu_do_alloc()
153 if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) in mm_iommu_do_alloc()
155 mem->pageshift = min(mem->pageshift, pageshift); in mm_iommu_do_alloc()
160 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; in mm_iommu_do_alloc()
164 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); in mm_iommu_do_alloc()
168 *pmem = mem; in mm_iommu_do_alloc()
174 unpin_user_pages(mem->hpages, pinned); in mm_iommu_do_alloc()
176 vfree(mem->hpas); in mm_iommu_do_alloc()
177 kfree(mem); in mm_iommu_do_alloc()
201 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_unpin() argument
206 if (!mem->hpas) in mm_iommu_unpin()
209 for (i = 0; i < mem->entries; ++i) { in mm_iommu_unpin()
210 if (!mem->hpas[i]) in mm_iommu_unpin()
213 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT); in mm_iommu_unpin()
217 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY) in mm_iommu_unpin()
222 mem->hpas[i] = 0; in mm_iommu_unpin()
226 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_do_free() argument
229 mm_iommu_unpin(mem); in mm_iommu_do_free()
230 vfree(mem->hpas); in mm_iommu_do_free()
231 kfree(mem); in mm_iommu_do_free()
236 struct mm_iommu_table_group_mem_t *mem = container_of(head, in mm_iommu_free() local
239 mm_iommu_do_free(mem); in mm_iommu_free()
242 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_release() argument
244 list_del_rcu(&mem->next); in mm_iommu_release()
245 call_rcu(&mem->rcu, mm_iommu_free); in mm_iommu_release()
248 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) in mm_iommu_put() argument
255 if (mem->used == 0) { in mm_iommu_put()
260 --mem->used; in mm_iommu_put()
262 if (mem->used) in mm_iommu_put()
266 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) { in mm_iommu_put()
267 ++mem->used; in mm_iommu_put()
272 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) in mm_iommu_put()
273 unlock_entries = mem->entries; in mm_iommu_put()
276 mm_iommu_release(mem); in mm_iommu_put()
290 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; in mm_iommu_lookup() local
292 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { in mm_iommu_lookup()
293 if ((mem->ua <= ua) && in mm_iommu_lookup()
294 (ua + size <= mem->ua + in mm_iommu_lookup()
295 (mem->entries << PAGE_SHIFT))) { in mm_iommu_lookup()
296 ret = mem; in mm_iommu_lookup()
308 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; in mm_iommu_lookup_rm() local
310 list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list, in mm_iommu_lookup_rm()
312 if ((mem->ua <= ua) && in mm_iommu_lookup_rm()
313 (ua + size <= mem->ua + in mm_iommu_lookup_rm()
314 (mem->entries << PAGE_SHIFT))) { in mm_iommu_lookup_rm()
315 ret = mem; in mm_iommu_lookup_rm()
326 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; in mm_iommu_get() local
330 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { in mm_iommu_get()
331 if ((mem->ua == ua) && (mem->entries == entries)) { in mm_iommu_get()
332 ret = mem; in mm_iommu_get()
333 ++mem->used; in mm_iommu_get()
344 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, in mm_iommu_ua_to_hpa() argument
347 const long entry = (ua - mem->ua) >> PAGE_SHIFT; in mm_iommu_ua_to_hpa()
350 if (entry >= mem->entries) in mm_iommu_ua_to_hpa()
353 if (pageshift > mem->pageshift) in mm_iommu_ua_to_hpa()
356 if (!mem->hpas) { in mm_iommu_ua_to_hpa()
357 *hpa = mem->dev_hpa + (ua - mem->ua); in mm_iommu_ua_to_hpa()
361 va = &mem->hpas[entry]; in mm_iommu_ua_to_hpa()
368 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, in mm_iommu_ua_to_hpa_rm() argument
371 const long entry = (ua - mem->ua) >> PAGE_SHIFT; in mm_iommu_ua_to_hpa_rm()
374 if (entry >= mem->entries) in mm_iommu_ua_to_hpa_rm()
377 if (pageshift > mem->pageshift) in mm_iommu_ua_to_hpa_rm()
380 if (!mem->hpas) { in mm_iommu_ua_to_hpa_rm()
381 *hpa = mem->dev_hpa + (ua - mem->ua); in mm_iommu_ua_to_hpa_rm()
385 pa = (void *) vmalloc_to_phys(&mem->hpas[entry]); in mm_iommu_ua_to_hpa_rm()
396 struct mm_iommu_table_group_mem_t *mem; in mm_iommu_ua_mark_dirty_rm() local
401 mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE); in mm_iommu_ua_mark_dirty_rm()
402 if (!mem) in mm_iommu_ua_mark_dirty_rm()
405 if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) in mm_iommu_ua_mark_dirty_rm()
408 entry = (ua - mem->ua) >> PAGE_SHIFT; in mm_iommu_ua_mark_dirty_rm()
409 va = &mem->hpas[entry]; in mm_iommu_ua_mark_dirty_rm()
421 struct mm_iommu_table_group_mem_t *mem; in mm_iommu_is_devmem() local
424 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { in mm_iommu_is_devmem()
425 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) in mm_iommu_is_devmem()
428 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT); in mm_iommu_is_devmem()
429 if ((mem->dev_hpa <= hpa) && (hpa < end)) { in mm_iommu_is_devmem()
445 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_mapped_inc() argument
447 if (atomic64_inc_not_zero(&mem->mapped)) in mm_iommu_mapped_inc()
455 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_mapped_dec() argument
457 atomic64_add_unless(&mem->mapped, -1, 1); in mm_iommu_mapped_dec()