1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <asm/mmu_context.h>
13 #include <asm/pgalloc.h>
14 #include <asm/gmap.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17
18 #ifdef CONFIG_PGSTE
19
20 static int page_table_allocate_pgste_min = 0;
21 static int page_table_allocate_pgste_max = 1;
22 int page_table_allocate_pgste = 0;
23 EXPORT_SYMBOL(page_table_allocate_pgste);
24
25 static struct ctl_table page_table_sysctl[] = {
26 {
27 .procname = "allocate_pgste",
28 .data = &page_table_allocate_pgste,
29 .maxlen = sizeof(int),
30 .mode = S_IRUGO | S_IWUSR,
31 .proc_handler = proc_dointvec_minmax,
32 .extra1 = &page_table_allocate_pgste_min,
33 .extra2 = &page_table_allocate_pgste_max,
34 },
35 { }
36 };
37
38 static struct ctl_table page_table_sysctl_dir[] = {
39 {
40 .procname = "vm",
41 .maxlen = 0,
42 .mode = 0555,
43 .child = page_table_sysctl,
44 },
45 { }
46 };
47
page_table_register_sysctl(void)48 static int __init page_table_register_sysctl(void)
49 {
50 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
51 }
52 __initcall(page_table_register_sysctl);
53
54 #endif /* CONFIG_PGSTE */
55
crst_table_alloc(struct mm_struct * mm)56 unsigned long *crst_table_alloc(struct mm_struct *mm)
57 {
58 struct page *page = alloc_pages(GFP_KERNEL, 2);
59
60 if (!page)
61 return NULL;
62 arch_set_page_dat(page, 2);
63 return (unsigned long *) page_to_phys(page);
64 }
65
crst_table_free(struct mm_struct * mm,unsigned long * table)66 void crst_table_free(struct mm_struct *mm, unsigned long *table)
67 {
68 free_pages((unsigned long) table, 2);
69 }
70
__crst_table_upgrade(void * arg)71 static void __crst_table_upgrade(void *arg)
72 {
73 struct mm_struct *mm = arg;
74
75 /* we must change all active ASCEs to avoid the creation of new TLBs */
76 if (current->active_mm == mm) {
77 S390_lowcore.user_asce = mm->context.asce;
78 if (current->thread.mm_segment == USER_DS) {
79 __ctl_load(S390_lowcore.user_asce, 1, 1);
80 /* Mark user-ASCE present in CR1 */
81 clear_cpu_flag(CIF_ASCE_PRIMARY);
82 }
83 if (current->thread.mm_segment == USER_DS_SACF) {
84 __ctl_load(S390_lowcore.user_asce, 7, 7);
85 /* enable_sacf_uaccess does all or nothing */
86 WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
87 }
88 }
89 __tlb_flush_local();
90 }
91
crst_table_upgrade(struct mm_struct * mm,unsigned long end)92 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
93 {
94 unsigned long *table, *pgd;
95 int rc, notify;
96
97 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
98 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
99 rc = 0;
100 notify = 0;
101 while (mm->context.asce_limit < end) {
102 table = crst_table_alloc(mm);
103 if (!table) {
104 rc = -ENOMEM;
105 break;
106 }
107 spin_lock_bh(&mm->page_table_lock);
108 pgd = (unsigned long *) mm->pgd;
109 if (mm->context.asce_limit == _REGION2_SIZE) {
110 crst_table_init(table, _REGION2_ENTRY_EMPTY);
111 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
112 mm->pgd = (pgd_t *) table;
113 mm->context.asce_limit = _REGION1_SIZE;
114 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
115 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
116 mm_inc_nr_puds(mm);
117 } else {
118 crst_table_init(table, _REGION1_ENTRY_EMPTY);
119 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
120 mm->pgd = (pgd_t *) table;
121 mm->context.asce_limit = -PAGE_SIZE;
122 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
124 }
125 notify = 1;
126 spin_unlock_bh(&mm->page_table_lock);
127 }
128 if (notify)
129 on_each_cpu(__crst_table_upgrade, mm, 0);
130 return rc;
131 }
132
crst_table_downgrade(struct mm_struct * mm)133 void crst_table_downgrade(struct mm_struct *mm)
134 {
135 pgd_t *pgd;
136
137 /* downgrade should only happen from 3 to 2 levels (compat only) */
138 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
139
140 if (current->active_mm == mm) {
141 clear_user_asce();
142 __tlb_flush_mm(mm);
143 }
144
145 pgd = mm->pgd;
146 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
147 mm->context.asce_limit = _REGION3_SIZE;
148 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
149 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
150 crst_table_free(mm, (unsigned long *) pgd);
151
152 if (current->active_mm == mm)
153 set_user_asce(mm);
154 }
155
atomic_xor_bits(atomic_t * v,unsigned int bits)156 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
157 {
158 unsigned int old, new;
159
160 do {
161 old = atomic_read(v);
162 new = old ^ bits;
163 } while (atomic_cmpxchg(v, old, new) != old);
164 return new;
165 }
166
167 #ifdef CONFIG_PGSTE
168
page_table_alloc_pgste(struct mm_struct * mm)169 struct page *page_table_alloc_pgste(struct mm_struct *mm)
170 {
171 struct page *page;
172 u64 *table;
173
174 page = alloc_page(GFP_KERNEL);
175 if (page) {
176 table = (u64 *)page_to_phys(page);
177 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
178 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
179 }
180 return page;
181 }
182
page_table_free_pgste(struct page * page)183 void page_table_free_pgste(struct page *page)
184 {
185 __free_page(page);
186 }
187
188 #endif /* CONFIG_PGSTE */
189
190 /*
191 * page table entry allocation/free routines.
192 */
page_table_alloc(struct mm_struct * mm)193 unsigned long *page_table_alloc(struct mm_struct *mm)
194 {
195 unsigned long *table;
196 struct page *page;
197 unsigned int mask, bit;
198
199 /* Try to get a fragment of a 4K page as a 2K page table */
200 if (!mm_alloc_pgste(mm)) {
201 table = NULL;
202 spin_lock_bh(&mm->context.lock);
203 if (!list_empty(&mm->context.pgtable_list)) {
204 page = list_first_entry(&mm->context.pgtable_list,
205 struct page, lru);
206 mask = atomic_read(&page->_refcount) >> 24;
207 mask = (mask | (mask >> 4)) & 3;
208 if (mask != 3) {
209 table = (unsigned long *) page_to_phys(page);
210 bit = mask & 1; /* =1 -> second 2K */
211 if (bit)
212 table += PTRS_PER_PTE;
213 atomic_xor_bits(&page->_refcount,
214 1U << (bit + 24));
215 list_del(&page->lru);
216 }
217 }
218 spin_unlock_bh(&mm->context.lock);
219 if (table)
220 return table;
221 }
222 /* Allocate a fresh page */
223 page = alloc_page(GFP_KERNEL);
224 if (!page)
225 return NULL;
226 if (!pgtable_page_ctor(page)) {
227 __free_page(page);
228 return NULL;
229 }
230 arch_set_page_dat(page, 0);
231 /* Initialize page table */
232 table = (unsigned long *) page_to_phys(page);
233 if (mm_alloc_pgste(mm)) {
234 /* Return 4K page table with PGSTEs */
235 atomic_xor_bits(&page->_refcount, 3 << 24);
236 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
237 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
238 } else {
239 /* Return the first 2K fragment of the page */
240 atomic_xor_bits(&page->_refcount, 1 << 24);
241 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
242 spin_lock_bh(&mm->context.lock);
243 list_add(&page->lru, &mm->context.pgtable_list);
244 spin_unlock_bh(&mm->context.lock);
245 }
246 return table;
247 }
248
page_table_free(struct mm_struct * mm,unsigned long * table)249 void page_table_free(struct mm_struct *mm, unsigned long *table)
250 {
251 struct page *page;
252 unsigned int bit, mask;
253
254 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
255 if (!mm_alloc_pgste(mm)) {
256 /* Free 2K page table fragment of a 4K page */
257 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
258 spin_lock_bh(&mm->context.lock);
259 mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24));
260 mask >>= 24;
261 if (mask & 3)
262 list_add(&page->lru, &mm->context.pgtable_list);
263 else
264 list_del(&page->lru);
265 spin_unlock_bh(&mm->context.lock);
266 if (mask != 0)
267 return;
268 } else {
269 atomic_xor_bits(&page->_refcount, 3U << 24);
270 }
271
272 pgtable_page_dtor(page);
273 __free_page(page);
274 }
275
page_table_free_rcu(struct mmu_gather * tlb,unsigned long * table,unsigned long vmaddr)276 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
277 unsigned long vmaddr)
278 {
279 struct mm_struct *mm;
280 struct page *page;
281 unsigned int bit, mask;
282
283 mm = tlb->mm;
284 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
285 if (mm_alloc_pgste(mm)) {
286 gmap_unlink(mm, table, vmaddr);
287 table = (unsigned long *) (__pa(table) | 3);
288 tlb_remove_table(tlb, table);
289 return;
290 }
291 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
292 spin_lock_bh(&mm->context.lock);
293 mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
294 mask >>= 24;
295 if (mask & 3)
296 list_add_tail(&page->lru, &mm->context.pgtable_list);
297 else
298 list_del(&page->lru);
299 spin_unlock_bh(&mm->context.lock);
300 table = (unsigned long *) (__pa(table) | (1U << bit));
301 tlb_remove_table(tlb, table);
302 }
303
__tlb_remove_table(void * _table)304 static void __tlb_remove_table(void *_table)
305 {
306 unsigned int mask = (unsigned long) _table & 3;
307 void *table = (void *)((unsigned long) _table ^ mask);
308 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
309
310 switch (mask) {
311 case 0: /* pmd, pud, or p4d */
312 free_pages((unsigned long) table, 2);
313 break;
314 case 1: /* lower 2K of a 4K page table */
315 case 2: /* higher 2K of a 4K page table */
316 mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
317 mask >>= 24;
318 if (mask != 0)
319 break;
320 /* fallthrough */
321 case 3: /* 4K page table with pgstes */
322 if (mask & 3)
323 atomic_xor_bits(&page->_refcount, 3 << 24);
324 pgtable_page_dtor(page);
325 __free_page(page);
326 break;
327 }
328 }
329
tlb_remove_table_smp_sync(void * arg)330 static void tlb_remove_table_smp_sync(void *arg)
331 {
332 /* Simply deliver the interrupt */
333 }
334
tlb_remove_table_one(void * table)335 static void tlb_remove_table_one(void *table)
336 {
337 /*
338 * This isn't an RCU grace period and hence the page-tables cannot be
339 * assumed to be actually RCU-freed.
340 *
341 * It is however sufficient for software page-table walkers that rely
342 * on IRQ disabling. See the comment near struct mmu_table_batch.
343 */
344 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
345 __tlb_remove_table(table);
346 }
347
tlb_remove_table_rcu(struct rcu_head * head)348 static void tlb_remove_table_rcu(struct rcu_head *head)
349 {
350 struct mmu_table_batch *batch;
351 int i;
352
353 batch = container_of(head, struct mmu_table_batch, rcu);
354
355 for (i = 0; i < batch->nr; i++)
356 __tlb_remove_table(batch->tables[i]);
357
358 free_page((unsigned long)batch);
359 }
360
tlb_table_flush(struct mmu_gather * tlb)361 void tlb_table_flush(struct mmu_gather *tlb)
362 {
363 struct mmu_table_batch **batch = &tlb->batch;
364
365 if (*batch) {
366 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
367 *batch = NULL;
368 }
369 }
370
tlb_remove_table(struct mmu_gather * tlb,void * table)371 void tlb_remove_table(struct mmu_gather *tlb, void *table)
372 {
373 struct mmu_table_batch **batch = &tlb->batch;
374
375 tlb->mm->context.flush_mm = 1;
376 if (*batch == NULL) {
377 *batch = (struct mmu_table_batch *)
378 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
379 if (*batch == NULL) {
380 __tlb_flush_mm_lazy(tlb->mm);
381 tlb_remove_table_one(table);
382 return;
383 }
384 (*batch)->nr = 0;
385 }
386 (*batch)->tables[(*batch)->nr++] = table;
387 if ((*batch)->nr == MAX_TABLE_BATCH)
388 tlb_flush_mmu(tlb);
389 }
390
391 /*
392 * Base infrastructure required to generate basic asces, region, segment,
393 * and page tables that do not make use of enhanced features like EDAT1.
394 */
395
396 static struct kmem_cache *base_pgt_cache;
397
base_pgt_alloc(void)398 static unsigned long base_pgt_alloc(void)
399 {
400 u64 *table;
401
402 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
403 if (table)
404 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
405 return (unsigned long) table;
406 }
407
base_pgt_free(unsigned long table)408 static void base_pgt_free(unsigned long table)
409 {
410 kmem_cache_free(base_pgt_cache, (void *) table);
411 }
412
base_crst_alloc(unsigned long val)413 static unsigned long base_crst_alloc(unsigned long val)
414 {
415 unsigned long table;
416
417 table = __get_free_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
418 if (table)
419 crst_table_init((unsigned long *)table, val);
420 return table;
421 }
422
base_crst_free(unsigned long table)423 static void base_crst_free(unsigned long table)
424 {
425 free_pages(table, CRST_ALLOC_ORDER);
426 }
427
428 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
429 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
430 unsigned long end) \
431 { \
432 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
433 \
434 return (next - 1) < (end - 1) ? next : end; \
435 }
436
BASE_ADDR_END_FUNC(page,_PAGE_SIZE)437 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
438 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
439 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
440 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
441 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
442
443 static inline unsigned long base_lra(unsigned long address)
444 {
445 unsigned long real;
446
447 asm volatile(
448 " lra %0,0(%1)\n"
449 : "=d" (real) : "a" (address) : "cc");
450 return real;
451 }
452
base_page_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)453 static int base_page_walk(unsigned long origin, unsigned long addr,
454 unsigned long end, int alloc)
455 {
456 unsigned long *pte, next;
457
458 if (!alloc)
459 return 0;
460 pte = (unsigned long *) origin;
461 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
462 do {
463 next = base_page_addr_end(addr, end);
464 *pte = base_lra(addr);
465 } while (pte++, addr = next, addr < end);
466 return 0;
467 }
468
base_segment_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)469 static int base_segment_walk(unsigned long origin, unsigned long addr,
470 unsigned long end, int alloc)
471 {
472 unsigned long *ste, next, table;
473 int rc;
474
475 ste = (unsigned long *) origin;
476 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
477 do {
478 next = base_segment_addr_end(addr, end);
479 if (*ste & _SEGMENT_ENTRY_INVALID) {
480 if (!alloc)
481 continue;
482 table = base_pgt_alloc();
483 if (!table)
484 return -ENOMEM;
485 *ste = table | _SEGMENT_ENTRY;
486 }
487 table = *ste & _SEGMENT_ENTRY_ORIGIN;
488 rc = base_page_walk(table, addr, next, alloc);
489 if (rc)
490 return rc;
491 if (!alloc)
492 base_pgt_free(table);
493 cond_resched();
494 } while (ste++, addr = next, addr < end);
495 return 0;
496 }
497
base_region3_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)498 static int base_region3_walk(unsigned long origin, unsigned long addr,
499 unsigned long end, int alloc)
500 {
501 unsigned long *rtte, next, table;
502 int rc;
503
504 rtte = (unsigned long *) origin;
505 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
506 do {
507 next = base_region3_addr_end(addr, end);
508 if (*rtte & _REGION_ENTRY_INVALID) {
509 if (!alloc)
510 continue;
511 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
512 if (!table)
513 return -ENOMEM;
514 *rtte = table | _REGION3_ENTRY;
515 }
516 table = *rtte & _REGION_ENTRY_ORIGIN;
517 rc = base_segment_walk(table, addr, next, alloc);
518 if (rc)
519 return rc;
520 if (!alloc)
521 base_crst_free(table);
522 } while (rtte++, addr = next, addr < end);
523 return 0;
524 }
525
base_region2_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)526 static int base_region2_walk(unsigned long origin, unsigned long addr,
527 unsigned long end, int alloc)
528 {
529 unsigned long *rste, next, table;
530 int rc;
531
532 rste = (unsigned long *) origin;
533 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
534 do {
535 next = base_region2_addr_end(addr, end);
536 if (*rste & _REGION_ENTRY_INVALID) {
537 if (!alloc)
538 continue;
539 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
540 if (!table)
541 return -ENOMEM;
542 *rste = table | _REGION2_ENTRY;
543 }
544 table = *rste & _REGION_ENTRY_ORIGIN;
545 rc = base_region3_walk(table, addr, next, alloc);
546 if (rc)
547 return rc;
548 if (!alloc)
549 base_crst_free(table);
550 } while (rste++, addr = next, addr < end);
551 return 0;
552 }
553
base_region1_walk(unsigned long origin,unsigned long addr,unsigned long end,int alloc)554 static int base_region1_walk(unsigned long origin, unsigned long addr,
555 unsigned long end, int alloc)
556 {
557 unsigned long *rfte, next, table;
558 int rc;
559
560 rfte = (unsigned long *) origin;
561 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
562 do {
563 next = base_region1_addr_end(addr, end);
564 if (*rfte & _REGION_ENTRY_INVALID) {
565 if (!alloc)
566 continue;
567 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
568 if (!table)
569 return -ENOMEM;
570 *rfte = table | _REGION1_ENTRY;
571 }
572 table = *rfte & _REGION_ENTRY_ORIGIN;
573 rc = base_region2_walk(table, addr, next, alloc);
574 if (rc)
575 return rc;
576 if (!alloc)
577 base_crst_free(table);
578 } while (rfte++, addr = next, addr < end);
579 return 0;
580 }
581
582 /**
583 * base_asce_free - free asce and tables returned from base_asce_alloc()
584 * @asce: asce to be freed
585 *
586 * Frees all region, segment, and page tables that were allocated with a
587 * corresponding base_asce_alloc() call.
588 */
base_asce_free(unsigned long asce)589 void base_asce_free(unsigned long asce)
590 {
591 unsigned long table = asce & _ASCE_ORIGIN;
592
593 if (!asce)
594 return;
595 switch (asce & _ASCE_TYPE_MASK) {
596 case _ASCE_TYPE_SEGMENT:
597 base_segment_walk(table, 0, _REGION3_SIZE, 0);
598 break;
599 case _ASCE_TYPE_REGION3:
600 base_region3_walk(table, 0, _REGION2_SIZE, 0);
601 break;
602 case _ASCE_TYPE_REGION2:
603 base_region2_walk(table, 0, _REGION1_SIZE, 0);
604 break;
605 case _ASCE_TYPE_REGION1:
606 base_region1_walk(table, 0, -_PAGE_SIZE, 0);
607 break;
608 }
609 base_crst_free(table);
610 }
611
base_pgt_cache_init(void)612 static int base_pgt_cache_init(void)
613 {
614 static DEFINE_MUTEX(base_pgt_cache_mutex);
615 unsigned long sz = _PAGE_TABLE_SIZE;
616
617 if (base_pgt_cache)
618 return 0;
619 mutex_lock(&base_pgt_cache_mutex);
620 if (!base_pgt_cache)
621 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
622 mutex_unlock(&base_pgt_cache_mutex);
623 return base_pgt_cache ? 0 : -ENOMEM;
624 }
625
626 /**
627 * base_asce_alloc - create kernel mapping without enhanced DAT features
628 * @addr: virtual start address of kernel mapping
629 * @num_pages: number of consecutive pages
630 *
631 * Generate an asce, including all required region, segment and page tables,
632 * that can be used to access the virtual kernel mapping. The difference is
633 * that the returned asce does not make use of any enhanced DAT features like
634 * e.g. large pages. This is required for some I/O functions that pass an
635 * asce, like e.g. some service call requests.
636 *
637 * Note: the returned asce may NEVER be attached to any cpu. It may only be
638 * used for I/O requests. tlb entries that might result because the
639 * asce was attached to a cpu won't be cleared.
640 */
base_asce_alloc(unsigned long addr,unsigned long num_pages)641 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
642 {
643 unsigned long asce, table, end;
644 int rc;
645
646 if (base_pgt_cache_init())
647 return 0;
648 end = addr + num_pages * PAGE_SIZE;
649 if (end <= _REGION3_SIZE) {
650 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
651 if (!table)
652 return 0;
653 rc = base_segment_walk(table, addr, end, 1);
654 asce = table | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
655 } else if (end <= _REGION2_SIZE) {
656 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
657 if (!table)
658 return 0;
659 rc = base_region3_walk(table, addr, end, 1);
660 asce = table | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
661 } else if (end <= _REGION1_SIZE) {
662 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
663 if (!table)
664 return 0;
665 rc = base_region2_walk(table, addr, end, 1);
666 asce = table | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
667 } else {
668 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
669 if (!table)
670 return 0;
671 rc = base_region1_walk(table, addr, end, 1);
672 asce = table | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
673 }
674 if (rc) {
675 base_asce_free(asce);
676 asce = 0;
677 }
678 return asce;
679 }
680