Lines Matching full:mem
132 struct mm_iommu_table_group_mem_t *mem; in mm_iommu_get() local
141 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, in mm_iommu_get()
143 if ((mem->ua == ua) && (mem->entries == entries)) { in mm_iommu_get()
144 ++mem->used; in mm_iommu_get()
145 *pmem = mem; in mm_iommu_get()
150 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) && in mm_iommu_get()
151 (ua < (mem->ua + in mm_iommu_get()
152 (mem->entries << PAGE_SHIFT)))) { in mm_iommu_get()
165 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in mm_iommu_get()
166 if (!mem) { in mm_iommu_get()
176 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_get()
177 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_get()
178 if (!mem->hpas) { in mm_iommu_get()
179 kfree(mem); in mm_iommu_get()
190 put_page(pfn_to_page(mem->hpas[j] >> in mm_iommu_get()
192 vfree(mem->hpas); in mm_iommu_get()
193 kfree(mem); in mm_iommu_get()
210 put_page(pfn_to_page(mem->hpas[j] >> in mm_iommu_get()
212 vfree(mem->hpas); in mm_iommu_get()
213 kfree(mem); in mm_iommu_get()
219 if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) { in mm_iommu_get()
235 mem->pageshift = min(mem->pageshift, pageshift); in mm_iommu_get()
236 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; in mm_iommu_get()
239 atomic64_set(&mem->mapped, 1); in mm_iommu_get()
240 mem->used = 1; in mm_iommu_get()
241 mem->ua = ua; in mm_iommu_get()
242 mem->entries = entries; in mm_iommu_get()
243 *pmem = mem; in mm_iommu_get()
245 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); in mm_iommu_get()
257 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_unpin() argument
262 for (i = 0; i < mem->entries; ++i) { in mm_iommu_unpin()
263 if (!mem->hpas[i]) in mm_iommu_unpin()
266 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT); in mm_iommu_unpin()
270 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY) in mm_iommu_unpin()
274 mem->hpas[i] = 0; in mm_iommu_unpin()
278 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_do_free() argument
281 mm_iommu_unpin(mem); in mm_iommu_do_free()
282 vfree(mem->hpas); in mm_iommu_do_free()
283 kfree(mem); in mm_iommu_do_free()
288 struct mm_iommu_table_group_mem_t *mem = container_of(head, in mm_iommu_free() local
291 mm_iommu_do_free(mem); in mm_iommu_free()
294 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_release() argument
296 list_del_rcu(&mem->next); in mm_iommu_release()
297 call_rcu(&mem->rcu, mm_iommu_free); in mm_iommu_release()
300 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) in mm_iommu_put() argument
306 if (mem->used == 0) { in mm_iommu_put()
311 --mem->used; in mm_iommu_put()
313 if (mem->used) in mm_iommu_put()
317 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) { in mm_iommu_put()
318 ++mem->used; in mm_iommu_put()
324 mm_iommu_release(mem); in mm_iommu_put()
326 mm_iommu_adjust_locked_vm(mm, mem->entries, false); in mm_iommu_put()
338 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; in mm_iommu_lookup() local
340 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { in mm_iommu_lookup()
341 if ((mem->ua <= ua) && in mm_iommu_lookup()
342 (ua + size <= mem->ua + in mm_iommu_lookup()
343 (mem->entries << PAGE_SHIFT))) { in mm_iommu_lookup()
344 ret = mem; in mm_iommu_lookup()
356 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; in mm_iommu_lookup_rm() local
358 list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list, in mm_iommu_lookup_rm()
360 if ((mem->ua <= ua) && in mm_iommu_lookup_rm()
361 (ua + size <= mem->ua + in mm_iommu_lookup_rm()
362 (mem->entries << PAGE_SHIFT))) { in mm_iommu_lookup_rm()
363 ret = mem; in mm_iommu_lookup_rm()
374 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; in mm_iommu_find() local
376 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { in mm_iommu_find()
377 if ((mem->ua == ua) && (mem->entries == entries)) { in mm_iommu_find()
378 ret = mem; in mm_iommu_find()
387 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, in mm_iommu_ua_to_hpa() argument
390 const long entry = (ua - mem->ua) >> PAGE_SHIFT; in mm_iommu_ua_to_hpa()
391 u64 *va = &mem->hpas[entry]; in mm_iommu_ua_to_hpa()
393 if (entry >= mem->entries) in mm_iommu_ua_to_hpa()
396 if (pageshift > mem->pageshift) in mm_iommu_ua_to_hpa()
405 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, in mm_iommu_ua_to_hpa_rm() argument
408 const long entry = (ua - mem->ua) >> PAGE_SHIFT; in mm_iommu_ua_to_hpa_rm()
409 void *va = &mem->hpas[entry]; in mm_iommu_ua_to_hpa_rm()
412 if (entry >= mem->entries) in mm_iommu_ua_to_hpa_rm()
415 if (pageshift > mem->pageshift) in mm_iommu_ua_to_hpa_rm()
429 struct mm_iommu_table_group_mem_t *mem; in mm_iommu_ua_mark_dirty_rm() local
434 mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE); in mm_iommu_ua_mark_dirty_rm()
435 if (!mem) in mm_iommu_ua_mark_dirty_rm()
438 entry = (ua - mem->ua) >> PAGE_SHIFT; in mm_iommu_ua_mark_dirty_rm()
439 va = &mem->hpas[entry]; in mm_iommu_ua_mark_dirty_rm()
448 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_mapped_inc() argument
450 if (atomic64_inc_not_zero(&mem->mapped)) in mm_iommu_mapped_inc()
458 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_mapped_dec() argument
460 atomic64_add_unless(&mem->mapped, -1, 1); in mm_iommu_mapped_dec()