1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15 #include <linux/fs.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/rwsem.h>
19 #include <linux/kprobes.h>
20 #include <linux/sched.h>
21 #include <linux/hardirq.h>
22 #include <linux/uaccess.h>
23 #include <linux/smp.h>
24 #include <linux/cdev.h>
25 #include <linux/compat.h>
26 #include <asm/hardwall.h>
27 #include <asm/traps.h>
28 #include <asm/siginfo.h>
29 #include <asm/irq_regs.h>
30
31 #include <arch/interrupts.h>
32 #include <arch/spr_def.h>
33
34
35 /*
36 * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
37 * We use "hardwall" nomenclature throughout for historical reasons.
38 * The lock here controls access to the list data structure as well as
39 * to the items on the list.
40 */
41 struct hardwall_type {
42 int index;
43 int is_xdn;
44 int is_idn;
45 int disabled;
46 const char *name;
47 struct list_head list;
48 spinlock_t lock;
49 struct proc_dir_entry *proc_dir;
50 };
51
52 enum hardwall_index {
53 HARDWALL_UDN = 0,
54 #ifndef __tilepro__
55 HARDWALL_IDN = 1,
56 HARDWALL_IPI = 2,
57 #endif
58 _HARDWALL_TYPES
59 };
60
61 static struct hardwall_type hardwall_types[] = {
62 { /* user-space access to UDN */
63 0,
64 1,
65 0,
66 0,
67 "udn",
68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
69 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
70 NULL
71 },
72 #ifndef __tilepro__
73 { /* user-space access to IDN */
74 1,
75 1,
76 1,
77 1, /* disabled pending hypervisor support */
78 "idn",
79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
80 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
81 NULL
82 },
83 { /* access to user-space IPI */
84 2,
85 0,
86 0,
87 0,
88 "ipi",
89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
90 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
91 NULL
92 },
93 #endif
94 };
95
96 /*
97 * This data structure tracks the cpu data, etc., associated
98 * one-to-one with a "struct file *" from opening a hardwall device file.
99 * Note that the file's private data points back to this structure.
100 */
101 struct hardwall_info {
102 struct list_head list; /* for hardwall_types.list */
103 struct list_head task_head; /* head of tasks in this hardwall */
104 struct hardwall_type *type; /* type of this resource */
105 struct cpumask cpumask; /* cpus reserved */
106 int id; /* integer id for this hardwall */
107 int teardown_in_progress; /* are we tearing this one down? */
108
109 /* Remaining fields only valid for user-network resources. */
110 int ulhc_x; /* upper left hand corner x coord */
111 int ulhc_y; /* upper left hand corner y coord */
112 int width; /* rectangle width */
113 int height; /* rectangle height */
114 #if CHIP_HAS_REV1_XDN()
115 atomic_t xdn_pending_count; /* cores in phase 1 of drain */
116 #endif
117 };
118
119
120 /* /proc/tile/hardwall */
121 static struct proc_dir_entry *hardwall_proc_dir;
122
123 /* Functions to manage files in /proc/tile/hardwall. */
124 static void hardwall_add_proc(struct hardwall_info *);
125 static void hardwall_remove_proc(struct hardwall_info *);
126
127 /* Allow disabling UDN access. */
noudn(char * str)128 static int __init noudn(char *str)
129 {
130 pr_info("User-space UDN access is disabled\n");
131 hardwall_types[HARDWALL_UDN].disabled = 1;
132 return 0;
133 }
134 early_param("noudn", noudn);
135
136 #ifndef __tilepro__
137 /* Allow disabling IDN access. */
noidn(char * str)138 static int __init noidn(char *str)
139 {
140 pr_info("User-space IDN access is disabled\n");
141 hardwall_types[HARDWALL_IDN].disabled = 1;
142 return 0;
143 }
144 early_param("noidn", noidn);
145
146 /* Allow disabling IPI access. */
noipi(char * str)147 static int __init noipi(char *str)
148 {
149 pr_info("User-space IPI access is disabled\n");
150 hardwall_types[HARDWALL_IPI].disabled = 1;
151 return 0;
152 }
153 early_param("noipi", noipi);
154 #endif
155
156
157 /*
158 * Low-level primitives for UDN/IDN
159 */
160
161 #ifdef __tilepro__
162 #define mtspr_XDN(hwt, name, val) \
163 do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
164 #define mtspr_MPL_XDN(hwt, name, val) \
165 do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
166 #define mfspr_XDN(hwt, name) \
167 ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
168 #else
169 #define mtspr_XDN(hwt, name, val) \
170 do { \
171 if ((hwt)->is_idn) \
172 __insn_mtspr(SPR_IDN_##name, (val)); \
173 else \
174 __insn_mtspr(SPR_UDN_##name, (val)); \
175 } while (0)
176 #define mtspr_MPL_XDN(hwt, name, val) \
177 do { \
178 if ((hwt)->is_idn) \
179 __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
180 else \
181 __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
182 } while (0)
183 #define mfspr_XDN(hwt, name) \
184 ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
185 #endif
186
187 /* Set a CPU bit if the CPU is online. */
188 #define cpu_online_set(cpu, dst) do { \
189 if (cpu_online(cpu)) \
190 cpumask_set_cpu(cpu, dst); \
191 } while (0)
192
193
194 /* Does the given rectangle contain the given x,y coordinate? */
contains(struct hardwall_info * r,int x,int y)195 static int contains(struct hardwall_info *r, int x, int y)
196 {
197 return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
198 (y >= r->ulhc_y && y < r->ulhc_y + r->height);
199 }
200
201 /* Compute the rectangle parameters and validate the cpumask. */
check_rectangle(struct hardwall_info * r,struct cpumask * mask)202 static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
203 {
204 int x, y, cpu, ulhc, lrhc;
205
206 /* The first cpu is the ULHC, the last the LRHC. */
207 ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
208 lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
209
210 /* Compute the rectangle attributes from the cpus. */
211 r->ulhc_x = cpu_x(ulhc);
212 r->ulhc_y = cpu_y(ulhc);
213 r->width = cpu_x(lrhc) - r->ulhc_x + 1;
214 r->height = cpu_y(lrhc) - r->ulhc_y + 1;
215
216 /* Width and height must be positive */
217 if (r->width <= 0 || r->height <= 0)
218 return -EINVAL;
219
220 /* Confirm that the cpumask is exactly the rectangle. */
221 for (y = 0, cpu = 0; y < smp_height; ++y)
222 for (x = 0; x < smp_width; ++x, ++cpu)
223 if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
224 return -EINVAL;
225
226 /*
227 * Note that offline cpus can't be drained when this user network
228 * rectangle eventually closes. We used to detect this
229 * situation and print a warning, but it annoyed users and
230 * they ignored it anyway, so now we just return without a
231 * warning.
232 */
233 return 0;
234 }
235
236 /*
237 * Hardware management of hardwall setup, teardown, trapping,
238 * and enabling/disabling PL0 access to the networks.
239 */
240
241 /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
242 enum direction_protect {
243 N_PROTECT = (1 << 0),
244 E_PROTECT = (1 << 1),
245 S_PROTECT = (1 << 2),
246 W_PROTECT = (1 << 3),
247 C_PROTECT = (1 << 4),
248 };
249
xdn_which_interrupt(struct hardwall_type * hwt)250 static inline int xdn_which_interrupt(struct hardwall_type *hwt)
251 {
252 #ifndef __tilepro__
253 if (hwt->is_idn)
254 return INT_IDN_FIREWALL;
255 #endif
256 return INT_UDN_FIREWALL;
257 }
258
enable_firewall_interrupts(struct hardwall_type * hwt)259 static void enable_firewall_interrupts(struct hardwall_type *hwt)
260 {
261 arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
262 }
263
disable_firewall_interrupts(struct hardwall_type * hwt)264 static void disable_firewall_interrupts(struct hardwall_type *hwt)
265 {
266 arch_local_irq_mask_now(xdn_which_interrupt(hwt));
267 }
268
269 /* Set up hardwall on this cpu based on the passed hardwall_info. */
hardwall_setup_func(void * info)270 static void hardwall_setup_func(void *info)
271 {
272 struct hardwall_info *r = info;
273 struct hardwall_type *hwt = r->type;
274
275 int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
276 int x = cpu_x(cpu);
277 int y = cpu_y(cpu);
278 int bits = 0;
279 if (x == r->ulhc_x)
280 bits |= W_PROTECT;
281 if (x == r->ulhc_x + r->width - 1)
282 bits |= E_PROTECT;
283 if (y == r->ulhc_y)
284 bits |= N_PROTECT;
285 if (y == r->ulhc_y + r->height - 1)
286 bits |= S_PROTECT;
287 BUG_ON(bits == 0);
288 mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
289 enable_firewall_interrupts(hwt);
290 }
291
292 /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
hardwall_protect_rectangle(struct hardwall_info * r)293 static void hardwall_protect_rectangle(struct hardwall_info *r)
294 {
295 int x, y, cpu, delta;
296 struct cpumask rect_cpus;
297
298 cpumask_clear(&rect_cpus);
299
300 /* First include the top and bottom edges */
301 cpu = r->ulhc_y * smp_width + r->ulhc_x;
302 delta = (r->height - 1) * smp_width;
303 for (x = 0; x < r->width; ++x, ++cpu) {
304 cpu_online_set(cpu, &rect_cpus);
305 cpu_online_set(cpu + delta, &rect_cpus);
306 }
307
308 /* Then the left and right edges */
309 cpu -= r->width;
310 delta = r->width - 1;
311 for (y = 0; y < r->height; ++y, cpu += smp_width) {
312 cpu_online_set(cpu, &rect_cpus);
313 cpu_online_set(cpu + delta, &rect_cpus);
314 }
315
316 /* Then tell all the cpus to set up their protection SPR */
317 on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
318 }
319
320 /* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
do_hardwall_trap(struct pt_regs * regs,int fault_num)321 void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
322 {
323 struct hardwall_info *rect;
324 struct hardwall_type *hwt;
325 struct task_struct *p;
326 struct siginfo info;
327 int cpu = smp_processor_id();
328 int found_processes;
329 struct pt_regs *old_regs = set_irq_regs(regs);
330
331 irq_enter();
332
333 /* Figure out which network trapped. */
334 switch (fault_num) {
335 #ifndef __tilepro__
336 case INT_IDN_FIREWALL:
337 hwt = &hardwall_types[HARDWALL_IDN];
338 break;
339 #endif
340 case INT_UDN_FIREWALL:
341 hwt = &hardwall_types[HARDWALL_UDN];
342 break;
343 default:
344 BUG();
345 }
346 BUG_ON(hwt->disabled);
347
348 /* This tile trapped a network access; find the rectangle. */
349 spin_lock(&hwt->lock);
350 list_for_each_entry(rect, &hwt->list, list) {
351 if (cpumask_test_cpu(cpu, &rect->cpumask))
352 break;
353 }
354
355 /*
356 * It shouldn't be possible not to find this cpu on the
357 * rectangle list, since only cpus in rectangles get hardwalled.
358 * The hardwall is only removed after the user network is drained.
359 */
360 BUG_ON(&rect->list == &hwt->list);
361
362 /*
363 * If we already started teardown on this hardwall, don't worry;
364 * the abort signal has been sent and we are just waiting for things
365 * to quiesce.
366 */
367 if (rect->teardown_in_progress) {
368 pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n",
369 cpu, hwt->name,
370 (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
371 goto done;
372 }
373
374 /*
375 * Kill off any process that is activated in this rectangle.
376 * We bypass security to deliver the signal, since it must be
377 * one of the activated processes that generated the user network
378 * message that caused this trap, and all the activated
379 * processes shared a single open file so are pretty tightly
380 * bound together from a security point of view to begin with.
381 */
382 rect->teardown_in_progress = 1;
383 wmb(); /* Ensure visibility of rectangle before notifying processes. */
384 pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
385 cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
386 info.si_signo = SIGILL;
387 info.si_errno = 0;
388 info.si_code = ILL_HARDWALL;
389 found_processes = 0;
390 list_for_each_entry(p, &rect->task_head,
391 thread.hardwall[hwt->index].list) {
392 BUG_ON(p->thread.hardwall[hwt->index].info != rect);
393 if (!(p->flags & PF_EXITING)) {
394 found_processes = 1;
395 pr_notice("hardwall: killing %d\n", p->pid);
396 do_send_sig_info(info.si_signo, &info, p, false);
397 }
398 }
399 if (!found_processes)
400 pr_notice("hardwall: no associated processes!\n");
401
402 done:
403 spin_unlock(&hwt->lock);
404
405 /*
406 * We have to disable firewall interrupts now, or else when we
407 * return from this handler, we will simply re-interrupt back to
408 * it. However, we can't clear the protection bits, since we
409 * haven't yet drained the network, and that would allow packets
410 * to cross out of the hardwall region.
411 */
412 disable_firewall_interrupts(hwt);
413
414 irq_exit();
415 set_irq_regs(old_regs);
416 }
417
418 /* Allow access from user space to the user network. */
grant_hardwall_mpls(struct hardwall_type * hwt)419 void grant_hardwall_mpls(struct hardwall_type *hwt)
420 {
421 #ifndef __tilepro__
422 if (!hwt->is_xdn) {
423 __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
424 return;
425 }
426 #endif
427 mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
428 mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
429 mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
430 mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
431 #if !CHIP_HAS_REV1_XDN()
432 mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
433 mtspr_MPL_XDN(hwt, CA_SET_0, 1);
434 #endif
435 }
436
437 /* Deny access from user space to the user network. */
restrict_hardwall_mpls(struct hardwall_type * hwt)438 void restrict_hardwall_mpls(struct hardwall_type *hwt)
439 {
440 #ifndef __tilepro__
441 if (!hwt->is_xdn) {
442 __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
443 return;
444 }
445 #endif
446 mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
447 mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
448 mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
449 mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
450 #if !CHIP_HAS_REV1_XDN()
451 mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
452 mtspr_MPL_XDN(hwt, CA_SET_1, 1);
453 #endif
454 }
455
456 /* Restrict or deny as necessary for the task we're switching to. */
hardwall_switch_tasks(struct task_struct * prev,struct task_struct * next)457 void hardwall_switch_tasks(struct task_struct *prev,
458 struct task_struct *next)
459 {
460 int i;
461 for (i = 0; i < HARDWALL_TYPES; ++i) {
462 if (prev->thread.hardwall[i].info != NULL) {
463 if (next->thread.hardwall[i].info == NULL)
464 restrict_hardwall_mpls(&hardwall_types[i]);
465 } else if (next->thread.hardwall[i].info != NULL) {
466 grant_hardwall_mpls(&hardwall_types[i]);
467 }
468 }
469 }
470
471 /* Does this task have the right to IPI the given cpu? */
hardwall_ipi_valid(int cpu)472 int hardwall_ipi_valid(int cpu)
473 {
474 #ifdef __tilegx__
475 struct hardwall_info *info =
476 current->thread.hardwall[HARDWALL_IPI].info;
477 return info && cpumask_test_cpu(cpu, &info->cpumask);
478 #else
479 return 0;
480 #endif
481 }
482
483 /*
484 * Code to create, activate, deactivate, and destroy hardwall resources.
485 */
486
487 /* Create a hardwall for the given resource */
hardwall_create(struct hardwall_type * hwt,size_t size,const unsigned char __user * bits)488 static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
489 size_t size,
490 const unsigned char __user *bits)
491 {
492 struct hardwall_info *iter, *info;
493 struct cpumask mask;
494 unsigned long flags;
495 int rc;
496
497 /* Reject crazy sizes out of hand, a la sys_mbind(). */
498 if (size > PAGE_SIZE)
499 return ERR_PTR(-EINVAL);
500
501 /* Copy whatever fits into a cpumask. */
502 if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
503 return ERR_PTR(-EFAULT);
504
505 /*
506 * If the size was short, clear the rest of the mask;
507 * otherwise validate that the rest of the user mask was zero
508 * (we don't try hard to be efficient when validating huge masks).
509 */
510 if (size < sizeof(struct cpumask)) {
511 memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
512 } else if (size > sizeof(struct cpumask)) {
513 size_t i;
514 for (i = sizeof(struct cpumask); i < size; ++i) {
515 char c;
516 if (get_user(c, &bits[i]))
517 return ERR_PTR(-EFAULT);
518 if (c)
519 return ERR_PTR(-EINVAL);
520 }
521 }
522
523 /* Allocate a new hardwall_info optimistically. */
524 info = kmalloc(sizeof(struct hardwall_info),
525 GFP_KERNEL | __GFP_ZERO);
526 if (info == NULL)
527 return ERR_PTR(-ENOMEM);
528 INIT_LIST_HEAD(&info->task_head);
529 info->type = hwt;
530
531 /* Compute the rectangle size and validate that it's plausible. */
532 cpumask_copy(&info->cpumask, &mask);
533 info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
534 if (hwt->is_xdn) {
535 rc = check_rectangle(info, &mask);
536 if (rc != 0) {
537 kfree(info);
538 return ERR_PTR(rc);
539 }
540 }
541
542 /*
543 * Eliminate cpus that are not part of this Linux client.
544 * Note that this allows for configurations that we might not want to
545 * support, such as one client on every even cpu, another client on
546 * every odd cpu.
547 */
548 cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
549
550 /* Confirm it doesn't overlap and add it to the list. */
551 spin_lock_irqsave(&hwt->lock, flags);
552 list_for_each_entry(iter, &hwt->list, list) {
553 if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
554 spin_unlock_irqrestore(&hwt->lock, flags);
555 kfree(info);
556 return ERR_PTR(-EBUSY);
557 }
558 }
559 list_add_tail(&info->list, &hwt->list);
560 spin_unlock_irqrestore(&hwt->lock, flags);
561
562 /* Set up appropriate hardwalling on all affected cpus. */
563 if (hwt->is_xdn)
564 hardwall_protect_rectangle(info);
565
566 /* Create a /proc/tile/hardwall entry. */
567 hardwall_add_proc(info);
568
569 return info;
570 }
571
572 /* Activate a given hardwall on this cpu for this process. */
hardwall_activate(struct hardwall_info * info)573 static int hardwall_activate(struct hardwall_info *info)
574 {
575 int cpu;
576 unsigned long flags;
577 struct task_struct *p = current;
578 struct thread_struct *ts = &p->thread;
579 struct hardwall_type *hwt;
580
581 /* Require a hardwall. */
582 if (info == NULL)
583 return -ENODATA;
584
585 /* Not allowed to activate a hardwall that is being torn down. */
586 if (info->teardown_in_progress)
587 return -EINVAL;
588
589 /*
590 * Get our affinity; if we're not bound to this tile uniquely,
591 * we can't access the network registers.
592 */
593 if (cpumask_weight(&p->cpus_allowed) != 1)
594 return -EPERM;
595
596 /* Make sure we are bound to a cpu assigned to this resource. */
597 cpu = smp_processor_id();
598 BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
599 if (!cpumask_test_cpu(cpu, &info->cpumask))
600 return -EINVAL;
601
602 /* If we are already bound to this hardwall, it's a no-op. */
603 hwt = info->type;
604 if (ts->hardwall[hwt->index].info) {
605 BUG_ON(ts->hardwall[hwt->index].info != info);
606 return 0;
607 }
608
609 /* Success! This process gets to use the resource on this cpu. */
610 ts->hardwall[hwt->index].info = info;
611 spin_lock_irqsave(&hwt->lock, flags);
612 list_add(&ts->hardwall[hwt->index].list, &info->task_head);
613 spin_unlock_irqrestore(&hwt->lock, flags);
614 grant_hardwall_mpls(hwt);
615 printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
616 p->pid, p->comm, hwt->name, cpu);
617 return 0;
618 }
619
620 /*
621 * Deactivate a task's hardwall. Must hold lock for hardwall_type.
622 * This method may be called from exit_thread(), so we don't want to
623 * rely on too many fields of struct task_struct still being valid.
624 * We assume the cpus_allowed, pid, and comm fields are still valid.
625 */
_hardwall_deactivate(struct hardwall_type * hwt,struct task_struct * task)626 static void _hardwall_deactivate(struct hardwall_type *hwt,
627 struct task_struct *task)
628 {
629 struct thread_struct *ts = &task->thread;
630
631 if (cpumask_weight(&task->cpus_allowed) != 1) {
632 pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
633 task->pid, task->comm, hwt->name,
634 cpumask_weight(&task->cpus_allowed));
635 BUG();
636 }
637
638 BUG_ON(ts->hardwall[hwt->index].info == NULL);
639 ts->hardwall[hwt->index].info = NULL;
640 list_del(&ts->hardwall[hwt->index].list);
641 if (task == current)
642 restrict_hardwall_mpls(hwt);
643 }
644
645 /* Deactivate a task's hardwall. */
hardwall_deactivate(struct hardwall_type * hwt,struct task_struct * task)646 static int hardwall_deactivate(struct hardwall_type *hwt,
647 struct task_struct *task)
648 {
649 unsigned long flags;
650 int activated;
651
652 spin_lock_irqsave(&hwt->lock, flags);
653 activated = (task->thread.hardwall[hwt->index].info != NULL);
654 if (activated)
655 _hardwall_deactivate(hwt, task);
656 spin_unlock_irqrestore(&hwt->lock, flags);
657
658 if (!activated)
659 return -EINVAL;
660
661 printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
662 task->pid, task->comm, hwt->name, raw_smp_processor_id());
663 return 0;
664 }
665
hardwall_deactivate_all(struct task_struct * task)666 void hardwall_deactivate_all(struct task_struct *task)
667 {
668 int i;
669 for (i = 0; i < HARDWALL_TYPES; ++i)
670 if (task->thread.hardwall[i].info)
671 hardwall_deactivate(&hardwall_types[i], task);
672 }
673
674 /* Stop the switch before draining the network. */
stop_xdn_switch(void * arg)675 static void stop_xdn_switch(void *arg)
676 {
677 #if !CHIP_HAS_REV1_XDN()
678 /* Freeze the switch and the demux. */
679 __insn_mtspr(SPR_UDN_SP_FREEZE,
680 SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
681 SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
682 SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
683 #else
684 /*
685 * Drop all packets bound for the core or off the edge.
686 * We rely on the normal hardwall protection setup code
687 * to have set the low four bits to trigger firewall interrupts,
688 * and shift those bits up to trigger "drop on send" semantics,
689 * plus adding "drop on send to core" for all switches.
690 * In practice it seems the switches latch the DIRECTION_PROTECT
691 * SPR so they won't start dropping if they're already
692 * delivering the last message to the core, but it doesn't
693 * hurt to enable it here.
694 */
695 struct hardwall_type *hwt = arg;
696 unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
697 mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
698 #endif
699 }
700
empty_xdn_demuxes(struct hardwall_type * hwt)701 static void empty_xdn_demuxes(struct hardwall_type *hwt)
702 {
703 #ifndef __tilepro__
704 if (hwt->is_idn) {
705 while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
706 (void) __tile_idn0_receive();
707 while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
708 (void) __tile_idn1_receive();
709 return;
710 }
711 #endif
712 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
713 (void) __tile_udn0_receive();
714 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
715 (void) __tile_udn1_receive();
716 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
717 (void) __tile_udn2_receive();
718 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
719 (void) __tile_udn3_receive();
720 }
721
722 /* Drain all the state from a stopped switch. */
drain_xdn_switch(void * arg)723 static void drain_xdn_switch(void *arg)
724 {
725 struct hardwall_info *info = arg;
726 struct hardwall_type *hwt = info->type;
727
728 #if CHIP_HAS_REV1_XDN()
729 /*
730 * The switches have been configured to drop any messages
731 * destined for cores (or off the edge of the rectangle).
732 * But the current message may continue to be delivered,
733 * so we wait until all the cores have finished any pending
734 * messages before we stop draining.
735 */
736 int pending = mfspr_XDN(hwt, PENDING);
737 while (pending--) {
738 empty_xdn_demuxes(hwt);
739 if (hwt->is_idn)
740 __tile_idn_send(0);
741 else
742 __tile_udn_send(0);
743 }
744 atomic_dec(&info->xdn_pending_count);
745 while (atomic_read(&info->xdn_pending_count))
746 empty_xdn_demuxes(hwt);
747 #else
748 int i;
749 int from_tile_words, ca_count;
750
751 /* Empty out the 5 switch point fifos. */
752 for (i = 0; i < 5; i++) {
753 int words, j;
754 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
755 words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
756 for (j = 0; j < words; j++)
757 (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
758 BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
759 }
760
761 /* Dump out the 3 word fifo at top. */
762 from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
763 for (i = 0; i < from_tile_words; i++)
764 (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
765
766 /* Empty out demuxes. */
767 empty_xdn_demuxes(hwt);
768
769 /* Empty out catch all. */
770 ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
771 for (i = 0; i < ca_count; i++)
772 (void) __insn_mfspr(SPR_UDN_CA_DATA);
773 BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
774
775 /* Clear demux logic. */
776 __insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
777
778 /*
779 * Write switch state; experimentation indicates that 0xc3000
780 * is an idle switch point.
781 */
782 for (i = 0; i < 5; i++) {
783 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
784 __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
785 }
786 #endif
787 }
788
789 /* Reset random XDN state registers at boot up and during hardwall teardown. */
reset_xdn_network_state(struct hardwall_type * hwt)790 static void reset_xdn_network_state(struct hardwall_type *hwt)
791 {
792 if (hwt->disabled)
793 return;
794
795 /* Clear out other random registers so we have a clean slate. */
796 mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
797 mtspr_XDN(hwt, AVAIL_EN, 0);
798 mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
799
800 #if !CHIP_HAS_REV1_XDN()
801 /* Reset UDN coordinates to their standard value */
802 {
803 unsigned int cpu = smp_processor_id();
804 unsigned int x = cpu_x(cpu);
805 unsigned int y = cpu_y(cpu);
806 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
807 }
808
809 /* Set demux tags to predefined values and enable them. */
810 __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
811 __insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
812 __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
813 __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
814 __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
815
816 /* Set other rev0 random registers to a clean state. */
817 __insn_mtspr(SPR_UDN_REFILL_EN, 0);
818 __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
819 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
820
821 /* Start the switch and demux. */
822 __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
823 #endif
824 }
825
reset_network_state(void)826 void reset_network_state(void)
827 {
828 reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
829 #ifndef __tilepro__
830 reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
831 #endif
832 }
833
834 /* Restart an XDN switch after draining. */
restart_xdn_switch(void * arg)835 static void restart_xdn_switch(void *arg)
836 {
837 struct hardwall_type *hwt = arg;
838
839 #if CHIP_HAS_REV1_XDN()
840 /* One last drain step to avoid races with injection and draining. */
841 empty_xdn_demuxes(hwt);
842 #endif
843
844 reset_xdn_network_state(hwt);
845
846 /* Disable firewall interrupts. */
847 disable_firewall_interrupts(hwt);
848 }
849
850 /* Last reference to a hardwall is gone, so clear the network. */
hardwall_destroy(struct hardwall_info * info)851 static void hardwall_destroy(struct hardwall_info *info)
852 {
853 struct task_struct *task;
854 struct hardwall_type *hwt;
855 unsigned long flags;
856
857 /* Make sure this file actually represents a hardwall. */
858 if (info == NULL)
859 return;
860
861 /*
862 * Deactivate any remaining tasks. It's possible to race with
863 * some other thread that is exiting and hasn't yet called
864 * deactivate (when freeing its thread_info), so we carefully
865 * deactivate any remaining tasks before freeing the
866 * hardwall_info object itself.
867 */
868 hwt = info->type;
869 info->teardown_in_progress = 1;
870 spin_lock_irqsave(&hwt->lock, flags);
871 list_for_each_entry(task, &info->task_head,
872 thread.hardwall[hwt->index].list)
873 _hardwall_deactivate(hwt, task);
874 spin_unlock_irqrestore(&hwt->lock, flags);
875
876 if (hwt->is_xdn) {
877 /* Configure the switches for draining the user network. */
878 printk(KERN_DEBUG
879 "Clearing %s hardwall rectangle %dx%d %d,%d\n",
880 hwt->name, info->width, info->height,
881 info->ulhc_x, info->ulhc_y);
882 on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
883
884 /* Drain the network. */
885 #if CHIP_HAS_REV1_XDN()
886 atomic_set(&info->xdn_pending_count,
887 cpumask_weight(&info->cpumask));
888 on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
889 #else
890 on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
891 #endif
892
893 /* Restart switch and disable firewall. */
894 on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
895 }
896
897 /* Remove the /proc/tile/hardwall entry. */
898 hardwall_remove_proc(info);
899
900 /* Now free the hardwall from the list. */
901 spin_lock_irqsave(&hwt->lock, flags);
902 BUG_ON(!list_empty(&info->task_head));
903 list_del(&info->list);
904 spin_unlock_irqrestore(&hwt->lock, flags);
905 kfree(info);
906 }
907
908
hardwall_proc_show(struct seq_file * sf,void * v)909 static int hardwall_proc_show(struct seq_file *sf, void *v)
910 {
911 struct hardwall_info *info = sf->private;
912
913 seq_printf(sf, "%*pbl\n", cpumask_pr_args(&info->cpumask));
914 return 0;
915 }
916
hardwall_proc_open(struct inode * inode,struct file * file)917 static int hardwall_proc_open(struct inode *inode,
918 struct file *file)
919 {
920 return single_open(file, hardwall_proc_show, PDE_DATA(inode));
921 }
922
923 static const struct file_operations hardwall_proc_fops = {
924 .open = hardwall_proc_open,
925 .read = seq_read,
926 .llseek = seq_lseek,
927 .release = single_release,
928 };
929
hardwall_add_proc(struct hardwall_info * info)930 static void hardwall_add_proc(struct hardwall_info *info)
931 {
932 char buf[64];
933 snprintf(buf, sizeof(buf), "%d", info->id);
934 proc_create_data(buf, 0444, info->type->proc_dir,
935 &hardwall_proc_fops, info);
936 }
937
hardwall_remove_proc(struct hardwall_info * info)938 static void hardwall_remove_proc(struct hardwall_info *info)
939 {
940 char buf[64];
941 snprintf(buf, sizeof(buf), "%d", info->id);
942 remove_proc_entry(buf, info->type->proc_dir);
943 }
944
proc_pid_hardwall(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * task)945 int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns,
946 struct pid *pid, struct task_struct *task)
947 {
948 int i;
949 int n = 0;
950 for (i = 0; i < HARDWALL_TYPES; ++i) {
951 struct hardwall_info *info = task->thread.hardwall[i].info;
952 if (info)
953 seq_printf(m, "%s: %d\n", info->type->name, info->id);
954 }
955 return n;
956 }
957
proc_tile_hardwall_init(struct proc_dir_entry * root)958 void proc_tile_hardwall_init(struct proc_dir_entry *root)
959 {
960 int i;
961 for (i = 0; i < HARDWALL_TYPES; ++i) {
962 struct hardwall_type *hwt = &hardwall_types[i];
963 if (hwt->disabled)
964 continue;
965 if (hardwall_proc_dir == NULL)
966 hardwall_proc_dir = proc_mkdir("hardwall", root);
967 hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
968 }
969 }
970
971
972 /*
973 * Character device support via ioctl/close.
974 */
975
hardwall_ioctl(struct file * file,unsigned int a,unsigned long b)976 static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
977 {
978 struct hardwall_info *info = file->private_data;
979 int minor = iminor(file->f_mapping->host);
980 struct hardwall_type* hwt;
981
982 if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
983 return -EINVAL;
984
985 BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
986 BUILD_BUG_ON(HARDWALL_TYPES !=
987 sizeof(hardwall_types)/sizeof(hardwall_types[0]));
988
989 if (minor < 0 || minor >= HARDWALL_TYPES)
990 return -EINVAL;
991 hwt = &hardwall_types[minor];
992 WARN_ON(info && hwt != info->type);
993
994 switch (_IOC_NR(a)) {
995 case _HARDWALL_CREATE:
996 if (hwt->disabled)
997 return -ENOSYS;
998 if (info != NULL)
999 return -EALREADY;
1000 info = hardwall_create(hwt, _IOC_SIZE(a),
1001 (const unsigned char __user *)b);
1002 if (IS_ERR(info))
1003 return PTR_ERR(info);
1004 file->private_data = info;
1005 return 0;
1006
1007 case _HARDWALL_ACTIVATE:
1008 return hardwall_activate(info);
1009
1010 case _HARDWALL_DEACTIVATE:
1011 if (current->thread.hardwall[hwt->index].info != info)
1012 return -EINVAL;
1013 return hardwall_deactivate(hwt, current);
1014
1015 case _HARDWALL_GET_ID:
1016 return info ? info->id : -EINVAL;
1017
1018 default:
1019 return -EINVAL;
1020 }
1021 }
1022
1023 #ifdef CONFIG_COMPAT
hardwall_compat_ioctl(struct file * file,unsigned int a,unsigned long b)1024 static long hardwall_compat_ioctl(struct file *file,
1025 unsigned int a, unsigned long b)
1026 {
1027 /* Sign-extend the argument so it can be used as a pointer. */
1028 return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
1029 }
1030 #endif
1031
1032 /* The user process closed the file; revoke access to user networks. */
hardwall_flush(struct file * file,fl_owner_t owner)1033 static int hardwall_flush(struct file *file, fl_owner_t owner)
1034 {
1035 struct hardwall_info *info = file->private_data;
1036 struct task_struct *task, *tmp;
1037 unsigned long flags;
1038
1039 if (info) {
1040 /*
1041 * NOTE: if multiple threads are activated on this hardwall
1042 * file, the other threads will continue having access to the
1043 * user network until they are context-switched out and back
1044 * in again.
1045 *
1046 * NOTE: A NULL files pointer means the task is being torn
1047 * down, so in that case we also deactivate it.
1048 */
1049 struct hardwall_type *hwt = info->type;
1050 spin_lock_irqsave(&hwt->lock, flags);
1051 list_for_each_entry_safe(task, tmp, &info->task_head,
1052 thread.hardwall[hwt->index].list) {
1053 if (task->files == owner || task->files == NULL)
1054 _hardwall_deactivate(hwt, task);
1055 }
1056 spin_unlock_irqrestore(&hwt->lock, flags);
1057 }
1058
1059 return 0;
1060 }
1061
1062 /* This hardwall is gone, so destroy it. */
hardwall_release(struct inode * inode,struct file * file)1063 static int hardwall_release(struct inode *inode, struct file *file)
1064 {
1065 hardwall_destroy(file->private_data);
1066 return 0;
1067 }
1068
1069 static const struct file_operations dev_hardwall_fops = {
1070 .open = nonseekable_open,
1071 .unlocked_ioctl = hardwall_ioctl,
1072 #ifdef CONFIG_COMPAT
1073 .compat_ioctl = hardwall_compat_ioctl,
1074 #endif
1075 .flush = hardwall_flush,
1076 .release = hardwall_release,
1077 };
1078
1079 static struct cdev hardwall_dev;
1080
dev_hardwall_init(void)1081 static int __init dev_hardwall_init(void)
1082 {
1083 int rc;
1084 dev_t dev;
1085
1086 rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
1087 if (rc < 0)
1088 return rc;
1089 cdev_init(&hardwall_dev, &dev_hardwall_fops);
1090 rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
1091 if (rc < 0)
1092 return rc;
1093
1094 return 0;
1095 }
1096 late_initcall(dev_hardwall_init);
1097