Lines Matching full:container
48 * A container needs to remember which preregistered region it has
57 * The container descriptor supports only a single group per container.
58 * Required by the API as the container is not supplied with the IOMMU group
73 static long tce_iommu_mm_set(struct tce_container *container) in tce_iommu_mm_set() argument
75 if (container->mm) { in tce_iommu_mm_set()
76 if (container->mm == current->mm) in tce_iommu_mm_set()
81 container->mm = current->mm; in tce_iommu_mm_set()
82 mmgrab(container->mm); in tce_iommu_mm_set()
87 static long tce_iommu_prereg_free(struct tce_container *container, in tce_iommu_prereg_free() argument
92 ret = mm_iommu_put(container->mm, tcemem->mem); in tce_iommu_prereg_free()
102 static long tce_iommu_unregister_pages(struct tce_container *container, in tce_iommu_unregister_pages() argument
113 mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT); in tce_iommu_unregister_pages()
117 list_for_each_entry(tcemem, &container->prereg_list, next) { in tce_iommu_unregister_pages()
127 ret = tce_iommu_prereg_free(container, tcemem); in tce_iommu_unregister_pages()
129 mm_iommu_put(container->mm, mem); in tce_iommu_unregister_pages()
134 static long tce_iommu_register_pages(struct tce_container *container, in tce_iommu_register_pages() argument
146 mem = mm_iommu_get(container->mm, vaddr, entries); in tce_iommu_register_pages()
148 list_for_each_entry(tcemem, &container->prereg_list, next) { in tce_iommu_register_pages()
155 ret = mm_iommu_new(container->mm, vaddr, entries, &mem); in tce_iommu_register_pages()
167 list_add(&tcemem->next, &container->prereg_list); in tce_iommu_register_pages()
169 container->enabled = true; in tce_iommu_register_pages()
174 mm_iommu_put(container->mm, mem); in tce_iommu_register_pages()
196 static inline bool tce_groups_attached(struct tce_container *container) in tce_groups_attached() argument
198 return !list_empty(&container->group_list); in tce_groups_attached()
201 static long tce_iommu_find_table(struct tce_container *container, in tce_iommu_find_table() argument
207 struct iommu_table *tbl = container->tables[i]; in tce_iommu_find_table()
224 static int tce_iommu_find_free_table(struct tce_container *container) in tce_iommu_find_free_table() argument
229 if (!container->tables[i]) in tce_iommu_find_free_table()
236 static int tce_iommu_enable(struct tce_container *container) in tce_iommu_enable() argument
243 if (container->enabled) in tce_iommu_enable()
271 * So we do not allow enabling a container without a group attached in tce_iommu_enable()
275 if (!tce_groups_attached(container)) in tce_iommu_enable()
278 tcegrp = list_first_entry(&container->group_list, in tce_iommu_enable()
287 ret = tce_iommu_mm_set(container); in tce_iommu_enable()
292 ret = account_locked_vm(container->mm, locked, true); in tce_iommu_enable()
296 container->locked_pages = locked; in tce_iommu_enable()
298 container->enabled = true; in tce_iommu_enable()
303 static void tce_iommu_disable(struct tce_container *container) in tce_iommu_disable() argument
305 if (!container->enabled) in tce_iommu_disable()
308 container->enabled = false; in tce_iommu_disable()
310 BUG_ON(!container->mm); in tce_iommu_disable()
311 account_locked_vm(container->mm, container->locked_pages, false); in tce_iommu_disable()
316 struct tce_container *container; in tce_iommu_open() local
323 container = kzalloc(sizeof(*container), GFP_KERNEL); in tce_iommu_open()
324 if (!container) in tce_iommu_open()
327 mutex_init(&container->lock); in tce_iommu_open()
328 INIT_LIST_HEAD_RCU(&container->group_list); in tce_iommu_open()
329 INIT_LIST_HEAD_RCU(&container->prereg_list); in tce_iommu_open()
331 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; in tce_iommu_open()
333 return container; in tce_iommu_open()
336 static int tce_iommu_clear(struct tce_container *container,
339 static void tce_iommu_free_table(struct tce_container *container,
344 struct tce_container *container = iommu_data; in tce_iommu_release() local
349 while (tce_groups_attached(container)) { in tce_iommu_release()
350 tcegrp = list_first_entry(&container->group_list, in tce_iommu_release()
360 struct iommu_table *tbl = container->tables[i]; in tce_iommu_release()
365 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_release()
366 tce_iommu_free_table(container, tbl); in tce_iommu_release()
369 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next) in tce_iommu_release()
370 WARN_ON(tce_iommu_prereg_free(container, tcemem)); in tce_iommu_release()
372 tce_iommu_disable(container); in tce_iommu_release()
373 if (container->mm) in tce_iommu_release()
374 mmdrop(container->mm); in tce_iommu_release()
375 mutex_destroy(&container->lock); in tce_iommu_release()
377 kfree(container); in tce_iommu_release()
380 static void tce_iommu_unuse_page(struct tce_container *container, in tce_iommu_unuse_page() argument
389 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, in tce_iommu_prereg_ua_to_hpa() argument
396 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); in tce_iommu_prereg_ua_to_hpa()
409 static void tce_iommu_unuse_page_v2(struct tce_container *container, in tce_iommu_unuse_page_v2() argument
420 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua), in tce_iommu_unuse_page_v2()
431 static int tce_iommu_clear(struct tce_container *container, in tce_iommu_clear() argument
463 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa, in tce_iommu_clear()
471 if (container->v2) { in tce_iommu_clear()
472 tce_iommu_unuse_page_v2(container, tbl, entry); in tce_iommu_clear()
476 tce_iommu_unuse_page(container, oldhpa); in tce_iommu_clear()
499 static long tce_iommu_build(struct tce_container *container, in tce_iommu_build() argument
515 if (!tce_page_is_contained(container->mm, hpa, in tce_iommu_build()
523 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, in tce_iommu_build()
526 tce_iommu_unuse_page(container, hpa); in tce_iommu_build()
534 tce_iommu_unuse_page(container, hpa); in tce_iommu_build()
540 tce_iommu_clear(container, tbl, entry, i); in tce_iommu_build()
547 static long tce_iommu_build_v2(struct tce_container *container, in tce_iommu_build_v2() argument
560 ret = tce_iommu_prereg_ua_to_hpa(container, in tce_iommu_build_v2()
565 if (!tce_page_is_contained(container->mm, hpa, in tce_iommu_build_v2()
579 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, in tce_iommu_build_v2()
583 tce_iommu_unuse_page_v2(container, tbl, entry + i); in tce_iommu_build_v2()
591 tce_iommu_unuse_page_v2(container, tbl, entry + i); in tce_iommu_build_v2()
599 tce_iommu_clear(container, tbl, entry, i); in tce_iommu_build_v2()
606 static long tce_iommu_create_table(struct tce_container *container, in tce_iommu_create_table() argument
621 ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true); in tce_iommu_create_table()
634 static void tce_iommu_free_table(struct tce_container *container, in tce_iommu_free_table() argument
640 account_locked_vm(container->mm, pages, false); in tce_iommu_free_table()
643 static long tce_iommu_create_window(struct tce_container *container, in tce_iommu_create_window() argument
652 num = tce_iommu_find_free_table(container); in tce_iommu_create_window()
657 tcegrp = list_first_entry(&container->group_list, in tce_iommu_create_window()
672 ret = tce_iommu_create_table(container, table_group, num, in tce_iommu_create_window()
683 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_create_window()
691 container->tables[num] = tbl; in tce_iommu_create_window()
699 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_create_window()
703 tce_iommu_free_table(container, tbl); in tce_iommu_create_window()
708 static long tce_iommu_remove_window(struct tce_container *container, in tce_iommu_remove_window() argument
716 num = tce_iommu_find_table(container, start_addr, &tbl); in tce_iommu_remove_window()
723 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_remove_window()
740 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_remove_window()
741 tce_iommu_free_table(container, tbl); in tce_iommu_remove_window()
742 container->tables[num] = NULL; in tce_iommu_remove_window()
747 static long tce_iommu_create_default_window(struct tce_container *container) in tce_iommu_create_default_window() argument
754 if (!container->def_window_pending) in tce_iommu_create_default_window()
757 if (!tce_groups_attached(container)) in tce_iommu_create_default_window()
760 tcegrp = list_first_entry(&container->group_list, in tce_iommu_create_default_window()
766 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, in tce_iommu_create_default_window()
771 container->def_window_pending = false; in tce_iommu_create_default_window()
779 struct tce_container *container = iommu_data; in tce_iommu_ioctl() local
802 BUG_ON(!container); in tce_iommu_ioctl()
803 if (container->mm && container->mm != current->mm) in tce_iommu_ioctl()
812 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
815 tcegrp = list_first_entry(&container->group_list, in tce_iommu_ioctl()
837 container->v2) { in tce_iommu_ioctl()
861 if (!container->enabled) in tce_iommu_ioctl()
876 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
880 num = tce_iommu_find_table(container, param.iova, &tbl); in tce_iommu_ioctl()
905 if (container->v2) in tce_iommu_ioctl()
906 ret = tce_iommu_build_v2(container, tbl, in tce_iommu_ioctl()
912 ret = tce_iommu_build(container, tbl, in tce_iommu_ioctl()
927 if (!container->enabled) in tce_iommu_ioctl()
943 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
947 num = tce_iommu_find_table(container, param.iova, &tbl); in tce_iommu_ioctl()
959 ret = tce_iommu_clear(container, tbl, in tce_iommu_ioctl()
969 if (!container->v2) in tce_iommu_ioctl()
975 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
989 mutex_lock(&container->lock); in tce_iommu_ioctl()
990 ret = tce_iommu_register_pages(container, param.vaddr, in tce_iommu_ioctl()
992 mutex_unlock(&container->lock); in tce_iommu_ioctl()
999 if (!container->v2) in tce_iommu_ioctl()
1002 if (!container->mm) in tce_iommu_ioctl()
1018 mutex_lock(&container->lock); in tce_iommu_ioctl()
1019 ret = tce_iommu_unregister_pages(container, param.vaddr, in tce_iommu_ioctl()
1021 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1026 if (container->v2) in tce_iommu_ioctl()
1029 mutex_lock(&container->lock); in tce_iommu_ioctl()
1030 ret = tce_iommu_enable(container); in tce_iommu_ioctl()
1031 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1036 if (container->v2) in tce_iommu_ioctl()
1039 mutex_lock(&container->lock); in tce_iommu_ioctl()
1040 tce_iommu_disable(container); in tce_iommu_ioctl()
1041 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1048 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_ioctl()
1060 if (!container->v2) in tce_iommu_ioctl()
1063 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
1067 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
1082 mutex_lock(&container->lock); in tce_iommu_ioctl()
1084 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
1086 ret = tce_iommu_create_window(container, in tce_iommu_ioctl()
1091 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1101 if (!container->v2) in tce_iommu_ioctl()
1104 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
1108 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
1123 if (container->def_window_pending && !remove.start_addr) { in tce_iommu_ioctl()
1124 container->def_window_pending = false; in tce_iommu_ioctl()
1128 mutex_lock(&container->lock); in tce_iommu_ioctl()
1130 ret = tce_iommu_remove_window(container, remove.start_addr); in tce_iommu_ioctl()
1132 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1141 static void tce_iommu_release_ownership(struct tce_container *container, in tce_iommu_release_ownership() argument
1147 struct iommu_table *tbl = container->tables[i]; in tce_iommu_release_ownership()
1152 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_release_ownership()
1156 container->tables[i] = NULL; in tce_iommu_release_ownership()
1160 static int tce_iommu_take_ownership(struct tce_container *container, in tce_iommu_take_ownership() argument
1182 container->tables[i] = table_group->tables[i]; in tce_iommu_take_ownership()
1187 static void tce_iommu_release_ownership_ddw(struct tce_container *container, in tce_iommu_release_ownership_ddw() argument
1198 if (container->tables[i]) in tce_iommu_release_ownership_ddw()
1204 static long tce_iommu_take_ownership_ddw(struct tce_container *container, in tce_iommu_take_ownership_ddw() argument
1219 struct iommu_table *tbl = container->tables[i]; in tce_iommu_take_ownership_ddw()
1244 struct tce_container *container = iommu_data; in tce_iommu_attach_group() local
1248 mutex_lock(&container->lock); in tce_iommu_attach_group()
1258 if (tce_groups_attached(container) && (!table_group->ops || in tce_iommu_attach_group()
1266 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_attach_group()
1294 if (container->v2) { in tce_iommu_attach_group()
1298 ret = tce_iommu_take_ownership(container, table_group); in tce_iommu_attach_group()
1300 if (!container->v2) { in tce_iommu_attach_group()
1304 ret = tce_iommu_take_ownership_ddw(container, table_group); in tce_iommu_attach_group()
1305 if (!tce_groups_attached(container) && !container->tables[0]) in tce_iommu_attach_group()
1306 container->def_window_pending = true; in tce_iommu_attach_group()
1311 list_add(&tcegrp->next, &container->group_list); in tce_iommu_attach_group()
1319 mutex_unlock(&container->lock); in tce_iommu_attach_group()
1327 struct tce_container *container = iommu_data; in tce_iommu_detach_group() local
1332 mutex_lock(&container->lock); in tce_iommu_detach_group()
1334 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_detach_group()
1354 tce_iommu_release_ownership(container, table_group); in tce_iommu_detach_group()
1356 tce_iommu_release_ownership_ddw(container, table_group); in tce_iommu_detach_group()
1359 mutex_unlock(&container->lock); in tce_iommu_detach_group()