1 /*
2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2003,2008 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
9 * Robert Jennings <rcjenn@us.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #include <linux/cpu.h>
18 #include <linux/types.h>
19 #include <linux/delay.h>
20 #include <linux/stat.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/console.h>
25 #include <linux/export.h>
26 #include <linux/mm.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/kobject.h>
29
30 #include <asm/iommu.h>
31 #include <asm/dma.h>
32 #include <asm/vio.h>
33 #include <asm/prom.h>
34 #include <asm/firmware.h>
35 #include <asm/tce.h>
36 #include <asm/page.h>
37 #include <asm/hvcall.h>
38
39 static struct vio_dev vio_bus_device = { /* fake "parent" device */
40 .name = "vio",
41 .type = "",
42 .dev.init_name = "vio",
43 .dev.bus = &vio_bus_type,
44 };
45
46 #ifdef CONFIG_PPC_SMLPAR
47 /**
48 * vio_cmo_pool - A pool of IO memory for CMO use
49 *
50 * @size: The size of the pool in bytes
51 * @free: The amount of free memory in the pool
52 */
53 struct vio_cmo_pool {
54 size_t size;
55 size_t free;
56 };
57
58 /* How many ms to delay queued balance work */
59 #define VIO_CMO_BALANCE_DELAY 100
60
61 /* Portion out IO memory to CMO devices by this chunk size */
62 #define VIO_CMO_BALANCE_CHUNK 131072
63
64 /**
65 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
66 *
67 * @vio_dev: struct vio_dev pointer
68 * @list: pointer to other devices on bus that are being tracked
69 */
70 struct vio_cmo_dev_entry {
71 struct vio_dev *viodev;
72 struct list_head list;
73 };
74
75 /**
76 * vio_cmo - VIO bus accounting structure for CMO entitlement
77 *
78 * @lock: spinlock for entire structure
79 * @balance_q: work queue for balancing system entitlement
80 * @device_list: list of CMO-enabled devices requiring entitlement
81 * @entitled: total system entitlement in bytes
82 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
83 * @excess: pool of excess entitlement not needed for device reserves or spare
84 * @spare: IO memory for device hotplug functionality
85 * @min: minimum necessary for system operation
86 * @desired: desired memory for system operation
87 * @curr: bytes currently allocated
88 * @high: high water mark for IO data usage
89 */
90 struct vio_cmo {
91 spinlock_t lock;
92 struct delayed_work balance_q;
93 struct list_head device_list;
94 size_t entitled;
95 struct vio_cmo_pool reserve;
96 struct vio_cmo_pool excess;
97 size_t spare;
98 size_t min;
99 size_t desired;
100 size_t curr;
101 size_t high;
102 } vio_cmo;
103
104 /**
105 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
106 */
vio_cmo_num_OF_devs(void)107 static int vio_cmo_num_OF_devs(void)
108 {
109 struct device_node *node_vroot;
110 int count = 0;
111
112 /*
113 * Count the number of vdevice entries with an
114 * ibm,my-dma-window OF property
115 */
116 node_vroot = of_find_node_by_name(NULL, "vdevice");
117 if (node_vroot) {
118 struct device_node *of_node;
119 struct property *prop;
120
121 for_each_child_of_node(node_vroot, of_node) {
122 prop = of_find_property(of_node, "ibm,my-dma-window",
123 NULL);
124 if (prop)
125 count++;
126 }
127 }
128 of_node_put(node_vroot);
129 return count;
130 }
131
132 /**
133 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
134 *
135 * @viodev: VIO device requesting IO memory
136 * @size: size of allocation requested
137 *
138 * Allocations come from memory reserved for the devices and any excess
139 * IO memory available to all devices. The spare pool used to service
140 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
141 * made available.
142 *
143 * Return codes:
144 * 0 for successful allocation and -ENOMEM for a failure
145 */
vio_cmo_alloc(struct vio_dev * viodev,size_t size)146 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
147 {
148 unsigned long flags;
149 size_t reserve_free = 0;
150 size_t excess_free = 0;
151 int ret = -ENOMEM;
152
153 spin_lock_irqsave(&vio_cmo.lock, flags);
154
155 /* Determine the amount of free entitlement available in reserve */
156 if (viodev->cmo.entitled > viodev->cmo.allocated)
157 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
158
159 /* If spare is not fulfilled, the excess pool can not be used. */
160 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
161 excess_free = vio_cmo.excess.free;
162
163 /* The request can be satisfied */
164 if ((reserve_free + excess_free) >= size) {
165 vio_cmo.curr += size;
166 if (vio_cmo.curr > vio_cmo.high)
167 vio_cmo.high = vio_cmo.curr;
168 viodev->cmo.allocated += size;
169 size -= min(reserve_free, size);
170 vio_cmo.excess.free -= size;
171 ret = 0;
172 }
173
174 spin_unlock_irqrestore(&vio_cmo.lock, flags);
175 return ret;
176 }
177
178 /**
179 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
180 * @viodev: VIO device freeing IO memory
181 * @size: size of deallocation
182 *
183 * IO memory is freed by the device back to the correct memory pools.
184 * The spare pool is replenished first from either memory pool, then
185 * the reserve pool is used to reduce device entitlement, the excess
186 * pool is used to increase the reserve pool toward the desired entitlement
187 * target, and then the remaining memory is returned to the pools.
188 *
189 */
vio_cmo_dealloc(struct vio_dev * viodev,size_t size)190 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
191 {
192 unsigned long flags;
193 size_t spare_needed = 0;
194 size_t excess_freed = 0;
195 size_t reserve_freed = size;
196 size_t tmp;
197 int balance = 0;
198
199 spin_lock_irqsave(&vio_cmo.lock, flags);
200 vio_cmo.curr -= size;
201
202 /* Amount of memory freed from the excess pool */
203 if (viodev->cmo.allocated > viodev->cmo.entitled) {
204 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
205 viodev->cmo.entitled));
206 reserve_freed -= excess_freed;
207 }
208
209 /* Remove allocation from device */
210 viodev->cmo.allocated -= (reserve_freed + excess_freed);
211
212 /* Spare is a subset of the reserve pool, replenish it first. */
213 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
214
215 /*
216 * Replenish the spare in the reserve pool from the excess pool.
217 * This moves entitlement into the reserve pool.
218 */
219 if (spare_needed && excess_freed) {
220 tmp = min(excess_freed, spare_needed);
221 vio_cmo.excess.size -= tmp;
222 vio_cmo.reserve.size += tmp;
223 vio_cmo.spare += tmp;
224 excess_freed -= tmp;
225 spare_needed -= tmp;
226 balance = 1;
227 }
228
229 /*
230 * Replenish the spare in the reserve pool from the reserve pool.
231 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
232 * if needed, and gives it to the spare pool. The amount of used
233 * memory in this pool does not change.
234 */
235 if (spare_needed && reserve_freed) {
236 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
237
238 vio_cmo.spare += tmp;
239 viodev->cmo.entitled -= tmp;
240 reserve_freed -= tmp;
241 spare_needed -= tmp;
242 balance = 1;
243 }
244
245 /*
246 * Increase the reserve pool until the desired allocation is met.
247 * Move an allocation freed from the excess pool into the reserve
248 * pool and schedule a balance operation.
249 */
250 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
251 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
252
253 vio_cmo.excess.size -= tmp;
254 vio_cmo.reserve.size += tmp;
255 excess_freed -= tmp;
256 balance = 1;
257 }
258
259 /* Return memory from the excess pool to that pool */
260 if (excess_freed)
261 vio_cmo.excess.free += excess_freed;
262
263 if (balance)
264 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
265 spin_unlock_irqrestore(&vio_cmo.lock, flags);
266 }
267
268 /**
269 * vio_cmo_entitlement_update - Manage system entitlement changes
270 *
271 * @new_entitlement: new system entitlement to attempt to accommodate
272 *
273 * Increases in entitlement will be used to fulfill the spare entitlement
274 * and the rest is given to the excess pool. Decreases, if they are
275 * possible, come from the excess pool and from unused device entitlement
276 *
277 * Returns: 0 on success, -ENOMEM when change can not be made
278 */
vio_cmo_entitlement_update(size_t new_entitlement)279 int vio_cmo_entitlement_update(size_t new_entitlement)
280 {
281 struct vio_dev *viodev;
282 struct vio_cmo_dev_entry *dev_ent;
283 unsigned long flags;
284 size_t avail, delta, tmp;
285
286 spin_lock_irqsave(&vio_cmo.lock, flags);
287
288 /* Entitlement increases */
289 if (new_entitlement > vio_cmo.entitled) {
290 delta = new_entitlement - vio_cmo.entitled;
291
292 /* Fulfill spare allocation */
293 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
294 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
295 vio_cmo.spare += tmp;
296 vio_cmo.reserve.size += tmp;
297 delta -= tmp;
298 }
299
300 /* Remaining new allocation goes to the excess pool */
301 vio_cmo.entitled += delta;
302 vio_cmo.excess.size += delta;
303 vio_cmo.excess.free += delta;
304
305 goto out;
306 }
307
308 /* Entitlement decreases */
309 delta = vio_cmo.entitled - new_entitlement;
310 avail = vio_cmo.excess.free;
311
312 /*
313 * Need to check how much unused entitlement each device can
314 * sacrifice to fulfill entitlement change.
315 */
316 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
317 if (avail >= delta)
318 break;
319
320 viodev = dev_ent->viodev;
321 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
322 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
323 avail += viodev->cmo.entitled -
324 max_t(size_t, viodev->cmo.allocated,
325 VIO_CMO_MIN_ENT);
326 }
327
328 if (delta <= avail) {
329 vio_cmo.entitled -= delta;
330
331 /* Take entitlement from the excess pool first */
332 tmp = min(vio_cmo.excess.free, delta);
333 vio_cmo.excess.size -= tmp;
334 vio_cmo.excess.free -= tmp;
335 delta -= tmp;
336
337 /*
338 * Remove all but VIO_CMO_MIN_ENT bytes from devices
339 * until entitlement change is served
340 */
341 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
342 if (!delta)
343 break;
344
345 viodev = dev_ent->viodev;
346 tmp = 0;
347 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
348 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
349 tmp = viodev->cmo.entitled -
350 max_t(size_t, viodev->cmo.allocated,
351 VIO_CMO_MIN_ENT);
352 viodev->cmo.entitled -= min(tmp, delta);
353 delta -= min(tmp, delta);
354 }
355 } else {
356 spin_unlock_irqrestore(&vio_cmo.lock, flags);
357 return -ENOMEM;
358 }
359
360 out:
361 schedule_delayed_work(&vio_cmo.balance_q, 0);
362 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 return 0;
364 }
365
366 /**
367 * vio_cmo_balance - Balance entitlement among devices
368 *
369 * @work: work queue structure for this operation
370 *
371 * Any system entitlement above the minimum needed for devices, or
372 * already allocated to devices, can be distributed to the devices.
373 * The list of devices is iterated through to recalculate the desired
374 * entitlement level and to determine how much entitlement above the
375 * minimum entitlement is allocated to devices.
376 *
377 * Small chunks of the available entitlement are given to devices until
378 * their requirements are fulfilled or there is no entitlement left to give.
379 * Upon completion sizes of the reserve and excess pools are calculated.
380 *
381 * The system minimum entitlement level is also recalculated here.
382 * Entitlement will be reserved for devices even after vio_bus_remove to
383 * accommodate reloading the driver. The OF tree is walked to count the
384 * number of devices present and this will remove entitlement for devices
385 * that have actually left the system after having vio_bus_remove called.
386 */
vio_cmo_balance(struct work_struct * work)387 static void vio_cmo_balance(struct work_struct *work)
388 {
389 struct vio_cmo *cmo;
390 struct vio_dev *viodev;
391 struct vio_cmo_dev_entry *dev_ent;
392 unsigned long flags;
393 size_t avail = 0, level, chunk, need;
394 int devcount = 0, fulfilled;
395
396 cmo = container_of(work, struct vio_cmo, balance_q.work);
397
398 spin_lock_irqsave(&vio_cmo.lock, flags);
399
400 /* Calculate minimum entitlement and fulfill spare */
401 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
402 BUG_ON(cmo->min > cmo->entitled);
403 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
404 cmo->min += cmo->spare;
405 cmo->desired = cmo->min;
406
407 /*
408 * Determine how much entitlement is available and reset device
409 * entitlements
410 */
411 avail = cmo->entitled - cmo->spare;
412 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
413 viodev = dev_ent->viodev;
414 devcount++;
415 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
416 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
417 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
418 }
419
420 /*
421 * Having provided each device with the minimum entitlement, loop
422 * over the devices portioning out the remaining entitlement
423 * until there is nothing left.
424 */
425 level = VIO_CMO_MIN_ENT;
426 while (avail) {
427 fulfilled = 0;
428 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
429 viodev = dev_ent->viodev;
430
431 if (viodev->cmo.desired <= level) {
432 fulfilled++;
433 continue;
434 }
435
436 /*
437 * Give the device up to VIO_CMO_BALANCE_CHUNK
438 * bytes of entitlement, but do not exceed the
439 * desired level of entitlement for the device.
440 */
441 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
442 chunk = min(chunk, (viodev->cmo.desired -
443 viodev->cmo.entitled));
444 viodev->cmo.entitled += chunk;
445
446 /*
447 * If the memory for this entitlement increase was
448 * already allocated to the device it does not come
449 * from the available pool being portioned out.
450 */
451 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
452 max(viodev->cmo.allocated, level);
453 avail -= need;
454
455 }
456 if (fulfilled == devcount)
457 break;
458 level += VIO_CMO_BALANCE_CHUNK;
459 }
460
461 /* Calculate new reserve and excess pool sizes */
462 cmo->reserve.size = cmo->min;
463 cmo->excess.free = 0;
464 cmo->excess.size = 0;
465 need = 0;
466 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
467 viodev = dev_ent->viodev;
468 /* Calculated reserve size above the minimum entitlement */
469 if (viodev->cmo.entitled)
470 cmo->reserve.size += (viodev->cmo.entitled -
471 VIO_CMO_MIN_ENT);
472 /* Calculated used excess entitlement */
473 if (viodev->cmo.allocated > viodev->cmo.entitled)
474 need += viodev->cmo.allocated - viodev->cmo.entitled;
475 }
476 cmo->excess.size = cmo->entitled - cmo->reserve.size;
477 cmo->excess.free = cmo->excess.size - need;
478
479 cancel_delayed_work(to_delayed_work(work));
480 spin_unlock_irqrestore(&vio_cmo.lock, flags);
481 }
482
vio_dma_iommu_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,struct dma_attrs * attrs)483 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
484 dma_addr_t *dma_handle, gfp_t flag,
485 struct dma_attrs *attrs)
486 {
487 struct vio_dev *viodev = to_vio_dev(dev);
488 void *ret;
489
490 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
491 atomic_inc(&viodev->cmo.allocs_failed);
492 return NULL;
493 }
494
495 ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
496 if (unlikely(ret == NULL)) {
497 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
498 atomic_inc(&viodev->cmo.allocs_failed);
499 }
500
501 return ret;
502 }
503
vio_dma_iommu_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,struct dma_attrs * attrs)504 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
505 void *vaddr, dma_addr_t dma_handle,
506 struct dma_attrs *attrs)
507 {
508 struct vio_dev *viodev = to_vio_dev(dev);
509
510 dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
511
512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
513 }
514
vio_dma_iommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)515 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
516 unsigned long offset, size_t size,
517 enum dma_data_direction direction,
518 struct dma_attrs *attrs)
519 {
520 struct vio_dev *viodev = to_vio_dev(dev);
521 struct iommu_table *tbl;
522 dma_addr_t ret = DMA_ERROR_CODE;
523
524 tbl = get_iommu_table_base(dev);
525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
526 atomic_inc(&viodev->cmo.allocs_failed);
527 return ret;
528 }
529
530 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
531 if (unlikely(dma_mapping_error(dev, ret))) {
532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
533 atomic_inc(&viodev->cmo.allocs_failed);
534 }
535
536 return ret;
537 }
538
vio_dma_iommu_unmap_page(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction direction,struct dma_attrs * attrs)539 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
540 size_t size,
541 enum dma_data_direction direction,
542 struct dma_attrs *attrs)
543 {
544 struct vio_dev *viodev = to_vio_dev(dev);
545 struct iommu_table *tbl;
546
547 tbl = get_iommu_table_base(dev);
548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
549
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
551 }
552
vio_dma_iommu_map_sg(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction,struct dma_attrs * attrs)553 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction,
555 struct dma_attrs *attrs)
556 {
557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct iommu_table *tbl;
559 struct scatterlist *sgl;
560 int ret, count;
561 size_t alloc_size = 0;
562
563 tbl = get_iommu_table_base(dev);
564 for_each_sg(sglist, sgl, nelems, count)
565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
566
567 if (vio_cmo_alloc(viodev, alloc_size)) {
568 atomic_inc(&viodev->cmo.allocs_failed);
569 return 0;
570 }
571
572 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
573
574 if (unlikely(!ret)) {
575 vio_cmo_dealloc(viodev, alloc_size);
576 atomic_inc(&viodev->cmo.allocs_failed);
577 return ret;
578 }
579
580 for_each_sg(sglist, sgl, ret, count)
581 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
582 if (alloc_size)
583 vio_cmo_dealloc(viodev, alloc_size);
584
585 return ret;
586 }
587
vio_dma_iommu_unmap_sg(struct device * dev,struct scatterlist * sglist,int nelems,enum dma_data_direction direction,struct dma_attrs * attrs)588 static void vio_dma_iommu_unmap_sg(struct device *dev,
589 struct scatterlist *sglist, int nelems,
590 enum dma_data_direction direction,
591 struct dma_attrs *attrs)
592 {
593 struct vio_dev *viodev = to_vio_dev(dev);
594 struct iommu_table *tbl;
595 struct scatterlist *sgl;
596 size_t alloc_size = 0;
597 int count;
598
599 tbl = get_iommu_table_base(dev);
600 for_each_sg(sglist, sgl, nelems, count)
601 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
602
603 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
604
605 vio_cmo_dealloc(viodev, alloc_size);
606 }
607
vio_dma_iommu_dma_supported(struct device * dev,u64 mask)608 static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
609 {
610 return dma_iommu_ops.dma_supported(dev, mask);
611 }
612
vio_dma_get_required_mask(struct device * dev)613 static u64 vio_dma_get_required_mask(struct device *dev)
614 {
615 return dma_iommu_ops.get_required_mask(dev);
616 }
617
618 struct dma_map_ops vio_dma_mapping_ops = {
619 .alloc = vio_dma_iommu_alloc_coherent,
620 .free = vio_dma_iommu_free_coherent,
621 .mmap = dma_direct_mmap_coherent,
622 .map_sg = vio_dma_iommu_map_sg,
623 .unmap_sg = vio_dma_iommu_unmap_sg,
624 .map_page = vio_dma_iommu_map_page,
625 .unmap_page = vio_dma_iommu_unmap_page,
626 .dma_supported = vio_dma_iommu_dma_supported,
627 .get_required_mask = vio_dma_get_required_mask,
628 };
629
630 /**
631 * vio_cmo_set_dev_desired - Set desired entitlement for a device
632 *
633 * @viodev: struct vio_dev for device to alter
634 * @desired: new desired entitlement level in bytes
635 *
636 * For use by devices to request a change to their entitlement at runtime or
637 * through sysfs. The desired entitlement level is changed and a balancing
638 * of system resources is scheduled to run in the future.
639 */
vio_cmo_set_dev_desired(struct vio_dev * viodev,size_t desired)640 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
641 {
642 unsigned long flags;
643 struct vio_cmo_dev_entry *dev_ent;
644 int found = 0;
645
646 if (!firmware_has_feature(FW_FEATURE_CMO))
647 return;
648
649 spin_lock_irqsave(&vio_cmo.lock, flags);
650 if (desired < VIO_CMO_MIN_ENT)
651 desired = VIO_CMO_MIN_ENT;
652
653 /*
654 * Changes will not be made for devices not in the device list.
655 * If it is not in the device list, then no driver is loaded
656 * for the device and it can not receive entitlement.
657 */
658 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
659 if (viodev == dev_ent->viodev) {
660 found = 1;
661 break;
662 }
663 if (!found) {
664 spin_unlock_irqrestore(&vio_cmo.lock, flags);
665 return;
666 }
667
668 /* Increase/decrease in desired device entitlement */
669 if (desired >= viodev->cmo.desired) {
670 /* Just bump the bus and device values prior to a balance*/
671 vio_cmo.desired += desired - viodev->cmo.desired;
672 viodev->cmo.desired = desired;
673 } else {
674 /* Decrease bus and device values for desired entitlement */
675 vio_cmo.desired -= viodev->cmo.desired - desired;
676 viodev->cmo.desired = desired;
677 /*
678 * If less entitlement is desired than current entitlement, move
679 * any reserve memory in the change region to the excess pool.
680 */
681 if (viodev->cmo.entitled > desired) {
682 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
683 vio_cmo.excess.size += viodev->cmo.entitled - desired;
684 /*
685 * If entitlement moving from the reserve pool to the
686 * excess pool is currently unused, add to the excess
687 * free counter.
688 */
689 if (viodev->cmo.allocated < viodev->cmo.entitled)
690 vio_cmo.excess.free += viodev->cmo.entitled -
691 max(viodev->cmo.allocated, desired);
692 viodev->cmo.entitled = desired;
693 }
694 }
695 schedule_delayed_work(&vio_cmo.balance_q, 0);
696 spin_unlock_irqrestore(&vio_cmo.lock, flags);
697 }
698
699 /**
700 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
701 *
702 * @viodev - Pointer to struct vio_dev for device
703 *
704 * Determine the devices IO memory entitlement needs, attempting
705 * to satisfy the system minimum entitlement at first and scheduling
706 * a balance operation to take care of the rest at a later time.
707 *
708 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
709 * -ENOMEM when entitlement is not available for device or
710 * device entry.
711 *
712 */
vio_cmo_bus_probe(struct vio_dev * viodev)713 static int vio_cmo_bus_probe(struct vio_dev *viodev)
714 {
715 struct vio_cmo_dev_entry *dev_ent;
716 struct device *dev = &viodev->dev;
717 struct iommu_table *tbl;
718 struct vio_driver *viodrv = to_vio_driver(dev->driver);
719 unsigned long flags;
720 size_t size;
721 bool dma_capable = false;
722
723 tbl = get_iommu_table_base(dev);
724
725 /* A device requires entitlement if it has a DMA window property */
726 switch (viodev->family) {
727 case VDEVICE:
728 if (of_get_property(viodev->dev.of_node,
729 "ibm,my-dma-window", NULL))
730 dma_capable = true;
731 break;
732 case PFO:
733 dma_capable = false;
734 break;
735 default:
736 dev_warn(dev, "unknown device family: %d\n", viodev->family);
737 BUG();
738 break;
739 }
740
741 /* Configure entitlement for the device. */
742 if (dma_capable) {
743 /* Check that the driver is CMO enabled and get desired DMA */
744 if (!viodrv->get_desired_dma) {
745 dev_err(dev, "%s: device driver does not support CMO\n",
746 __func__);
747 return -EINVAL;
748 }
749
750 viodev->cmo.desired =
751 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
752 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
753 viodev->cmo.desired = VIO_CMO_MIN_ENT;
754 size = VIO_CMO_MIN_ENT;
755
756 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
757 GFP_KERNEL);
758 if (!dev_ent)
759 return -ENOMEM;
760
761 dev_ent->viodev = viodev;
762 spin_lock_irqsave(&vio_cmo.lock, flags);
763 list_add(&dev_ent->list, &vio_cmo.device_list);
764 } else {
765 viodev->cmo.desired = 0;
766 size = 0;
767 spin_lock_irqsave(&vio_cmo.lock, flags);
768 }
769
770 /*
771 * If the needs for vio_cmo.min have not changed since they
772 * were last set, the number of devices in the OF tree has
773 * been constant and the IO memory for this is already in
774 * the reserve pool.
775 */
776 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
777 VIO_CMO_MIN_ENT)) {
778 /* Updated desired entitlement if device requires it */
779 if (size)
780 vio_cmo.desired += (viodev->cmo.desired -
781 VIO_CMO_MIN_ENT);
782 } else {
783 size_t tmp;
784
785 tmp = vio_cmo.spare + vio_cmo.excess.free;
786 if (tmp < size) {
787 dev_err(dev, "%s: insufficient free "
788 "entitlement to add device. "
789 "Need %lu, have %lu\n", __func__,
790 size, (vio_cmo.spare + tmp));
791 spin_unlock_irqrestore(&vio_cmo.lock, flags);
792 return -ENOMEM;
793 }
794
795 /* Use excess pool first to fulfill request */
796 tmp = min(size, vio_cmo.excess.free);
797 vio_cmo.excess.free -= tmp;
798 vio_cmo.excess.size -= tmp;
799 vio_cmo.reserve.size += tmp;
800
801 /* Use spare if excess pool was insufficient */
802 vio_cmo.spare -= size - tmp;
803
804 /* Update bus accounting */
805 vio_cmo.min += size;
806 vio_cmo.desired += viodev->cmo.desired;
807 }
808 spin_unlock_irqrestore(&vio_cmo.lock, flags);
809 return 0;
810 }
811
812 /**
813 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
814 *
815 * @viodev - Pointer to struct vio_dev for device
816 *
817 * Remove the device from the cmo device list. The minimum entitlement
818 * will be reserved for the device as long as it is in the system. The
819 * rest of the entitlement the device had been allocated will be returned
820 * to the system.
821 */
vio_cmo_bus_remove(struct vio_dev * viodev)822 static void vio_cmo_bus_remove(struct vio_dev *viodev)
823 {
824 struct vio_cmo_dev_entry *dev_ent;
825 unsigned long flags;
826 size_t tmp;
827
828 spin_lock_irqsave(&vio_cmo.lock, flags);
829 if (viodev->cmo.allocated) {
830 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
831 "allocated after remove operation.\n",
832 __func__, viodev->cmo.allocated);
833 BUG();
834 }
835
836 /*
837 * Remove the device from the device list being maintained for
838 * CMO enabled devices.
839 */
840 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
841 if (viodev == dev_ent->viodev) {
842 list_del(&dev_ent->list);
843 kfree(dev_ent);
844 break;
845 }
846
847 /*
848 * Devices may not require any entitlement and they do not need
849 * to be processed. Otherwise, return the device's entitlement
850 * back to the pools.
851 */
852 if (viodev->cmo.entitled) {
853 /*
854 * This device has not yet left the OF tree, it's
855 * minimum entitlement remains in vio_cmo.min and
856 * vio_cmo.desired
857 */
858 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
859
860 /*
861 * Save min allocation for device in reserve as long
862 * as it exists in OF tree as determined by later
863 * balance operation
864 */
865 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
866
867 /* Replenish spare from freed reserve pool */
868 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
869 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
870 vio_cmo.spare));
871 vio_cmo.spare += tmp;
872 viodev->cmo.entitled -= tmp;
873 }
874
875 /* Remaining reserve goes to excess pool */
876 vio_cmo.excess.size += viodev->cmo.entitled;
877 vio_cmo.excess.free += viodev->cmo.entitled;
878 vio_cmo.reserve.size -= viodev->cmo.entitled;
879
880 /*
881 * Until the device is removed it will keep a
882 * minimum entitlement; this will guarantee that
883 * a module unload/load will result in a success.
884 */
885 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
886 viodev->cmo.desired = VIO_CMO_MIN_ENT;
887 atomic_set(&viodev->cmo.allocs_failed, 0);
888 }
889
890 spin_unlock_irqrestore(&vio_cmo.lock, flags);
891 }
892
vio_cmo_set_dma_ops(struct vio_dev * viodev)893 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
894 {
895 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
896 }
897
898 /**
899 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
900 *
901 * Set up the reserve and excess entitlement pools based on available
902 * system entitlement and the number of devices in the OF tree that
903 * require entitlement in the reserve pool.
904 */
vio_cmo_bus_init(void)905 static void vio_cmo_bus_init(void)
906 {
907 struct hvcall_mpp_data mpp_data;
908 int err;
909
910 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
911 spin_lock_init(&vio_cmo.lock);
912 INIT_LIST_HEAD(&vio_cmo.device_list);
913 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
914
915 /* Get current system entitlement */
916 err = h_get_mpp(&mpp_data);
917
918 /*
919 * On failure, continue with entitlement set to 0, will panic()
920 * later when spare is reserved.
921 */
922 if (err != H_SUCCESS) {
923 printk(KERN_ERR "%s: unable to determine system IO "\
924 "entitlement. (%d)\n", __func__, err);
925 vio_cmo.entitled = 0;
926 } else {
927 vio_cmo.entitled = mpp_data.entitled_mem;
928 }
929
930 /* Set reservation and check against entitlement */
931 vio_cmo.spare = VIO_CMO_MIN_ENT;
932 vio_cmo.reserve.size = vio_cmo.spare;
933 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
934 VIO_CMO_MIN_ENT);
935 if (vio_cmo.reserve.size > vio_cmo.entitled) {
936 printk(KERN_ERR "%s: insufficient system entitlement\n",
937 __func__);
938 panic("%s: Insufficient system entitlement", __func__);
939 }
940
941 /* Set the remaining accounting variables */
942 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
943 vio_cmo.excess.free = vio_cmo.excess.size;
944 vio_cmo.min = vio_cmo.reserve.size;
945 vio_cmo.desired = vio_cmo.reserve.size;
946 }
947
948 /* sysfs device functions and data structures for CMO */
949
950 #define viodev_cmo_rd_attr(name) \
951 static ssize_t viodev_cmo_##name##_show(struct device *dev, \
952 struct device_attribute *attr, \
953 char *buf) \
954 { \
955 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
956 }
957
viodev_cmo_allocs_failed_show(struct device * dev,struct device_attribute * attr,char * buf)958 static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
959 struct device_attribute *attr, char *buf)
960 {
961 struct vio_dev *viodev = to_vio_dev(dev);
962 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
963 }
964
viodev_cmo_allocs_failed_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)965 static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
966 struct device_attribute *attr, const char *buf, size_t count)
967 {
968 struct vio_dev *viodev = to_vio_dev(dev);
969 atomic_set(&viodev->cmo.allocs_failed, 0);
970 return count;
971 }
972
viodev_cmo_desired_set(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)973 static ssize_t viodev_cmo_desired_set(struct device *dev,
974 struct device_attribute *attr, const char *buf, size_t count)
975 {
976 struct vio_dev *viodev = to_vio_dev(dev);
977 size_t new_desired;
978 int ret;
979
980 ret = kstrtoul(buf, 10, &new_desired);
981 if (ret)
982 return ret;
983
984 vio_cmo_set_dev_desired(viodev, new_desired);
985 return count;
986 }
987
988 viodev_cmo_rd_attr(desired);
989 viodev_cmo_rd_attr(entitled);
990 viodev_cmo_rd_attr(allocated);
991
992 static ssize_t name_show(struct device *, struct device_attribute *, char *);
993 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
994 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
995 char *buf);
996 static struct device_attribute vio_cmo_dev_attrs[] = {
997 __ATTR_RO(name),
998 __ATTR_RO(devspec),
999 __ATTR_RO(modalias),
1000 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1001 viodev_cmo_desired_show, viodev_cmo_desired_set),
1002 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
1003 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
1004 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1005 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
1006 __ATTR_NULL
1007 };
1008
1009 /* sysfs bus functions and data structures for CMO */
1010
1011 #define viobus_cmo_rd_attr(name) \
1012 static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf) \
1013 { \
1014 return sprintf(buf, "%lu\n", vio_cmo.name); \
1015 } \
1016 static BUS_ATTR_RO(cmo_##name)
1017
1018 #define viobus_cmo_pool_rd_attr(name, var) \
1019 static ssize_t \
1020 cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1021 { \
1022 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1023 } \
1024 static BUS_ATTR_RO(cmo_##name##_##var)
1025
1026 viobus_cmo_rd_attr(entitled);
1027 viobus_cmo_rd_attr(spare);
1028 viobus_cmo_rd_attr(min);
1029 viobus_cmo_rd_attr(desired);
1030 viobus_cmo_rd_attr(curr);
1031 viobus_cmo_pool_rd_attr(reserve, size);
1032 viobus_cmo_pool_rd_attr(excess, size);
1033 viobus_cmo_pool_rd_attr(excess, free);
1034
cmo_high_show(struct bus_type * bt,char * buf)1035 static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1036 {
1037 return sprintf(buf, "%lu\n", vio_cmo.high);
1038 }
1039
cmo_high_store(struct bus_type * bt,const char * buf,size_t count)1040 static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1041 size_t count)
1042 {
1043 unsigned long flags;
1044
1045 spin_lock_irqsave(&vio_cmo.lock, flags);
1046 vio_cmo.high = vio_cmo.curr;
1047 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1048
1049 return count;
1050 }
1051 static BUS_ATTR_RW(cmo_high);
1052
1053 static struct attribute *vio_bus_attrs[] = {
1054 &bus_attr_cmo_entitled.attr,
1055 &bus_attr_cmo_spare.attr,
1056 &bus_attr_cmo_min.attr,
1057 &bus_attr_cmo_desired.attr,
1058 &bus_attr_cmo_curr.attr,
1059 &bus_attr_cmo_high.attr,
1060 &bus_attr_cmo_reserve_size.attr,
1061 &bus_attr_cmo_excess_size.attr,
1062 &bus_attr_cmo_excess_free.attr,
1063 NULL,
1064 };
1065 ATTRIBUTE_GROUPS(vio_bus);
1066
vio_cmo_sysfs_init(void)1067 static void vio_cmo_sysfs_init(void)
1068 {
1069 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1070 vio_bus_type.bus_groups = vio_bus_groups;
1071 }
1072 #else /* CONFIG_PPC_SMLPAR */
vio_cmo_entitlement_update(size_t new_entitlement)1073 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
vio_cmo_set_dev_desired(struct vio_dev * viodev,size_t desired)1074 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
vio_cmo_bus_probe(struct vio_dev * viodev)1075 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
vio_cmo_bus_remove(struct vio_dev * viodev)1076 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
vio_cmo_set_dma_ops(struct vio_dev * viodev)1077 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
vio_cmo_bus_init(void)1078 static void vio_cmo_bus_init(void) {}
vio_cmo_sysfs_init(void)1079 static void vio_cmo_sysfs_init(void) { }
1080 #endif /* CONFIG_PPC_SMLPAR */
1081 EXPORT_SYMBOL(vio_cmo_entitlement_update);
1082 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1083
1084
1085 /*
1086 * Platform Facilities Option (PFO) support
1087 */
1088
1089 /**
1090 * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
1091 *
1092 * @vdev - Pointer to a struct vio_dev for device
1093 * @op - Pointer to a struct vio_pfo_op for the operation parameters
1094 *
1095 * Calls the hypervisor to synchronously perform the PFO operation
1096 * described in @op. In the case of a busy response from the hypervisor,
1097 * the operation will be re-submitted indefinitely unless a non-zero timeout
1098 * is specified or an error occurs. The timeout places a limit on when to
1099 * stop re-submitting a operation, the total time can be exceeded if an
1100 * operation is in progress.
1101 *
1102 * If op->hcall_ret is not NULL, this will be set to the return from the
1103 * last h_cop_op call or it will be 0 if an error not involving the h_call
1104 * was encountered.
1105 *
1106 * Returns:
1107 * 0 on success,
1108 * -EINVAL if the h_call fails due to an invalid parameter,
1109 * -E2BIG if the h_call can not be performed synchronously,
1110 * -EBUSY if a timeout is specified and has elapsed,
1111 * -EACCES if the memory area for data/status has been rescinded, or
1112 * -EPERM if a hardware fault has been indicated
1113 */
vio_h_cop_sync(struct vio_dev * vdev,struct vio_pfo_op * op)1114 int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1115 {
1116 struct device *dev = &vdev->dev;
1117 unsigned long deadline = 0;
1118 long hret = 0;
1119 int ret = 0;
1120
1121 if (op->timeout)
1122 deadline = jiffies + msecs_to_jiffies(op->timeout);
1123
1124 while (true) {
1125 hret = plpar_hcall_norets(H_COP, op->flags,
1126 vdev->resource_id,
1127 op->in, op->inlen, op->out,
1128 op->outlen, op->csbcpb);
1129
1130 if (hret == H_SUCCESS ||
1131 (hret != H_NOT_ENOUGH_RESOURCES &&
1132 hret != H_BUSY && hret != H_RESOURCE) ||
1133 (op->timeout && time_after(deadline, jiffies)))
1134 break;
1135
1136 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1137 }
1138
1139 switch (hret) {
1140 case H_SUCCESS:
1141 ret = 0;
1142 break;
1143 case H_OP_MODE:
1144 case H_TOO_BIG:
1145 ret = -E2BIG;
1146 break;
1147 case H_RESCINDED:
1148 ret = -EACCES;
1149 break;
1150 case H_HARDWARE:
1151 ret = -EPERM;
1152 break;
1153 case H_NOT_ENOUGH_RESOURCES:
1154 case H_RESOURCE:
1155 case H_BUSY:
1156 ret = -EBUSY;
1157 break;
1158 default:
1159 ret = -EINVAL;
1160 break;
1161 }
1162
1163 if (ret)
1164 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1165 __func__, ret, hret);
1166
1167 op->hcall_err = hret;
1168 return ret;
1169 }
1170 EXPORT_SYMBOL(vio_h_cop_sync);
1171
vio_build_iommu_table(struct vio_dev * dev)1172 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1173 {
1174 const __be32 *dma_window;
1175 struct iommu_table *tbl;
1176 unsigned long offset, size;
1177
1178 dma_window = of_get_property(dev->dev.of_node,
1179 "ibm,my-dma-window", NULL);
1180 if (!dma_window)
1181 return NULL;
1182
1183 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1184 if (tbl == NULL)
1185 return NULL;
1186
1187 of_parse_dma_window(dev->dev.of_node, dma_window,
1188 &tbl->it_index, &offset, &size);
1189
1190 /* TCE table size - measured in tce entries */
1191 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1192 tbl->it_size = size >> tbl->it_page_shift;
1193 /* offset for VIO should always be 0 */
1194 tbl->it_offset = offset >> tbl->it_page_shift;
1195 tbl->it_busno = 0;
1196 tbl->it_type = TCE_VB;
1197 tbl->it_blocksize = 16;
1198
1199 if (firmware_has_feature(FW_FEATURE_LPAR))
1200 tbl->it_ops = &iommu_table_lpar_multi_ops;
1201 else
1202 tbl->it_ops = &iommu_table_pseries_ops;
1203
1204 return iommu_init_table(tbl, -1);
1205 }
1206
1207 /**
1208 * vio_match_device: - Tell if a VIO device has a matching
1209 * VIO device id structure.
1210 * @ids: array of VIO device id structures to search in
1211 * @dev: the VIO device structure to match against
1212 *
1213 * Used by a driver to check whether a VIO device present in the
1214 * system is in its list of supported devices. Returns the matching
1215 * vio_device_id structure or NULL if there is no match.
1216 */
vio_match_device(const struct vio_device_id * ids,const struct vio_dev * dev)1217 static const struct vio_device_id *vio_match_device(
1218 const struct vio_device_id *ids, const struct vio_dev *dev)
1219 {
1220 while (ids->type[0] != '\0') {
1221 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1222 of_device_is_compatible(dev->dev.of_node,
1223 ids->compat))
1224 return ids;
1225 ids++;
1226 }
1227 return NULL;
1228 }
1229
1230 /*
1231 * Convert from struct device to struct vio_dev and pass to driver.
1232 * dev->driver has already been set by generic code because vio_bus_match
1233 * succeeded.
1234 */
vio_bus_probe(struct device * dev)1235 static int vio_bus_probe(struct device *dev)
1236 {
1237 struct vio_dev *viodev = to_vio_dev(dev);
1238 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1239 const struct vio_device_id *id;
1240 int error = -ENODEV;
1241
1242 if (!viodrv->probe)
1243 return error;
1244
1245 id = vio_match_device(viodrv->id_table, viodev);
1246 if (id) {
1247 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1248 if (firmware_has_feature(FW_FEATURE_CMO)) {
1249 error = vio_cmo_bus_probe(viodev);
1250 if (error)
1251 return error;
1252 }
1253 error = viodrv->probe(viodev, id);
1254 if (error && firmware_has_feature(FW_FEATURE_CMO))
1255 vio_cmo_bus_remove(viodev);
1256 }
1257
1258 return error;
1259 }
1260
1261 /* convert from struct device to struct vio_dev and pass to driver. */
vio_bus_remove(struct device * dev)1262 static int vio_bus_remove(struct device *dev)
1263 {
1264 struct vio_dev *viodev = to_vio_dev(dev);
1265 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1266 struct device *devptr;
1267 int ret = 1;
1268
1269 /*
1270 * Hold a reference to the device after the remove function is called
1271 * to allow for CMO accounting cleanup for the device.
1272 */
1273 devptr = get_device(dev);
1274
1275 if (viodrv->remove)
1276 ret = viodrv->remove(viodev);
1277
1278 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1279 vio_cmo_bus_remove(viodev);
1280
1281 put_device(devptr);
1282 return ret;
1283 }
1284
1285 /**
1286 * vio_register_driver: - Register a new vio driver
1287 * @viodrv: The vio_driver structure to be registered.
1288 */
__vio_register_driver(struct vio_driver * viodrv,struct module * owner,const char * mod_name)1289 int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1290 const char *mod_name)
1291 {
1292 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1293
1294 /* fill in 'struct driver' fields */
1295 viodrv->driver.name = viodrv->name;
1296 viodrv->driver.pm = viodrv->pm;
1297 viodrv->driver.bus = &vio_bus_type;
1298 viodrv->driver.owner = owner;
1299 viodrv->driver.mod_name = mod_name;
1300
1301 return driver_register(&viodrv->driver);
1302 }
1303 EXPORT_SYMBOL(__vio_register_driver);
1304
1305 /**
1306 * vio_unregister_driver - Remove registration of vio driver.
1307 * @viodrv: The vio_driver struct to be removed form registration
1308 */
vio_unregister_driver(struct vio_driver * viodrv)1309 void vio_unregister_driver(struct vio_driver *viodrv)
1310 {
1311 driver_unregister(&viodrv->driver);
1312 }
1313 EXPORT_SYMBOL(vio_unregister_driver);
1314
1315 /* vio_dev refcount hit 0 */
vio_dev_release(struct device * dev)1316 static void vio_dev_release(struct device *dev)
1317 {
1318 struct iommu_table *tbl = get_iommu_table_base(dev);
1319
1320 if (tbl)
1321 iommu_free_table(tbl, of_node_full_name(dev->of_node));
1322 of_node_put(dev->of_node);
1323 kfree(to_vio_dev(dev));
1324 }
1325
1326 /**
1327 * vio_register_device_node: - Register a new vio device.
1328 * @of_node: The OF node for this device.
1329 *
1330 * Creates and initializes a vio_dev structure from the data in
1331 * of_node and adds it to the list of virtual devices.
1332 * Returns a pointer to the created vio_dev or NULL if node has
1333 * NULL device_type or compatible fields.
1334 */
vio_register_device_node(struct device_node * of_node)1335 struct vio_dev *vio_register_device_node(struct device_node *of_node)
1336 {
1337 struct vio_dev *viodev;
1338 struct device_node *parent_node;
1339 const __be32 *prop;
1340 enum vio_dev_family family;
1341 const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
1342
1343 /*
1344 * Determine if this node is a under the /vdevice node or under the
1345 * /ibm,platform-facilities node. This decides the device's family.
1346 */
1347 parent_node = of_get_parent(of_node);
1348 if (parent_node) {
1349 if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
1350 family = PFO;
1351 else if (!strcmp(parent_node->full_name, "/vdevice"))
1352 family = VDEVICE;
1353 else {
1354 pr_warn("%s: parent(%s) of %s not recognized.\n",
1355 __func__,
1356 parent_node->full_name,
1357 of_node_name);
1358 of_node_put(parent_node);
1359 return NULL;
1360 }
1361 of_node_put(parent_node);
1362 } else {
1363 pr_warn("%s: could not determine the parent of node %s.\n",
1364 __func__, of_node_name);
1365 return NULL;
1366 }
1367
1368 if (family == PFO) {
1369 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1370 pr_debug("%s: Skipping the interrupt controller %s.\n",
1371 __func__, of_node_name);
1372 return NULL;
1373 }
1374 }
1375
1376 /* allocate a vio_dev for this node */
1377 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1378 if (viodev == NULL) {
1379 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1380 return NULL;
1381 }
1382
1383 /* we need the 'device_type' property, in order to match with drivers */
1384 viodev->family = family;
1385 if (viodev->family == VDEVICE) {
1386 unsigned int unit_address;
1387
1388 if (of_node->type != NULL)
1389 viodev->type = of_node->type;
1390 else {
1391 pr_warn("%s: node %s is missing the 'device_type' "
1392 "property.\n", __func__, of_node_name);
1393 goto out;
1394 }
1395
1396 prop = of_get_property(of_node, "reg", NULL);
1397 if (prop == NULL) {
1398 pr_warn("%s: node %s missing 'reg'\n",
1399 __func__, of_node_name);
1400 goto out;
1401 }
1402 unit_address = of_read_number(prop, 1);
1403 dev_set_name(&viodev->dev, "%x", unit_address);
1404 viodev->irq = irq_of_parse_and_map(of_node, 0);
1405 viodev->unit_address = unit_address;
1406 } else {
1407 /* PFO devices need their resource_id for submitting COP_OPs
1408 * This is an optional field for devices, but is required when
1409 * performing synchronous ops */
1410 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1411 if (prop != NULL)
1412 viodev->resource_id = of_read_number(prop, 1);
1413
1414 dev_set_name(&viodev->dev, "%s", of_node_name);
1415 viodev->type = of_node_name;
1416 viodev->irq = 0;
1417 }
1418
1419 viodev->name = of_node->name;
1420 viodev->dev.of_node = of_node_get(of_node);
1421
1422 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1423
1424 /* init generic 'struct device' fields: */
1425 viodev->dev.parent = &vio_bus_device.dev;
1426 viodev->dev.bus = &vio_bus_type;
1427 viodev->dev.release = vio_dev_release;
1428
1429 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1430 if (firmware_has_feature(FW_FEATURE_CMO))
1431 vio_cmo_set_dma_ops(viodev);
1432 else
1433 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1434
1435 set_iommu_table_base(&viodev->dev,
1436 vio_build_iommu_table(viodev));
1437
1438 /* needed to ensure proper operation of coherent allocations
1439 * later, in case driver doesn't set it explicitly */
1440 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1441 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1442 }
1443
1444 /* register with generic device framework */
1445 if (device_register(&viodev->dev)) {
1446 printk(KERN_ERR "%s: failed to register device %s\n",
1447 __func__, dev_name(&viodev->dev));
1448 put_device(&viodev->dev);
1449 return NULL;
1450 }
1451
1452 return viodev;
1453
1454 out: /* Use this exit point for any return prior to device_register */
1455 kfree(viodev);
1456
1457 return NULL;
1458 }
1459 EXPORT_SYMBOL(vio_register_device_node);
1460
1461 /*
1462 * vio_bus_scan_for_devices - Scan OF and register each child device
1463 * @root_name - OF node name for the root of the subtree to search.
1464 * This must be non-NULL
1465 *
1466 * Starting from the root node provide, register the device node for
1467 * each child beneath the root.
1468 */
vio_bus_scan_register_devices(char * root_name)1469 static void vio_bus_scan_register_devices(char *root_name)
1470 {
1471 struct device_node *node_root, *node_child;
1472
1473 if (!root_name)
1474 return;
1475
1476 node_root = of_find_node_by_name(NULL, root_name);
1477 if (node_root) {
1478
1479 /*
1480 * Create struct vio_devices for each virtual device in
1481 * the device tree. Drivers will associate with them later.
1482 */
1483 node_child = of_get_next_child(node_root, NULL);
1484 while (node_child) {
1485 vio_register_device_node(node_child);
1486 node_child = of_get_next_child(node_root, node_child);
1487 }
1488 of_node_put(node_root);
1489 }
1490 }
1491
1492 /**
1493 * vio_bus_init: - Initialize the virtual IO bus
1494 */
vio_bus_init(void)1495 static int __init vio_bus_init(void)
1496 {
1497 int err;
1498
1499 if (firmware_has_feature(FW_FEATURE_CMO))
1500 vio_cmo_sysfs_init();
1501
1502 err = bus_register(&vio_bus_type);
1503 if (err) {
1504 printk(KERN_ERR "failed to register VIO bus\n");
1505 return err;
1506 }
1507
1508 /*
1509 * The fake parent of all vio devices, just to give us
1510 * a nice directory
1511 */
1512 err = device_register(&vio_bus_device.dev);
1513 if (err) {
1514 printk(KERN_WARNING "%s: device_register returned %i\n",
1515 __func__, err);
1516 return err;
1517 }
1518
1519 if (firmware_has_feature(FW_FEATURE_CMO))
1520 vio_cmo_bus_init();
1521
1522 return 0;
1523 }
1524 postcore_initcall(vio_bus_init);
1525
vio_device_init(void)1526 static int __init vio_device_init(void)
1527 {
1528 vio_bus_scan_register_devices("vdevice");
1529 vio_bus_scan_register_devices("ibm,platform-facilities");
1530
1531 return 0;
1532 }
1533 device_initcall(vio_device_init);
1534
name_show(struct device * dev,struct device_attribute * attr,char * buf)1535 static ssize_t name_show(struct device *dev,
1536 struct device_attribute *attr, char *buf)
1537 {
1538 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1539 }
1540
devspec_show(struct device * dev,struct device_attribute * attr,char * buf)1541 static ssize_t devspec_show(struct device *dev,
1542 struct device_attribute *attr, char *buf)
1543 {
1544 struct device_node *of_node = dev->of_node;
1545
1546 return sprintf(buf, "%s\n", of_node_full_name(of_node));
1547 }
1548
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)1549 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1550 char *buf)
1551 {
1552 const struct vio_dev *vio_dev = to_vio_dev(dev);
1553 struct device_node *dn;
1554 const char *cp;
1555
1556 dn = dev->of_node;
1557 if (!dn) {
1558 strcpy(buf, "\n");
1559 return strlen(buf);
1560 }
1561 cp = of_get_property(dn, "compatible", NULL);
1562 if (!cp) {
1563 strcpy(buf, "\n");
1564 return strlen(buf);
1565 }
1566
1567 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1568 }
1569
1570 static struct device_attribute vio_dev_attrs[] = {
1571 __ATTR_RO(name),
1572 __ATTR_RO(devspec),
1573 __ATTR_RO(modalias),
1574 __ATTR_NULL
1575 };
1576
vio_unregister_device(struct vio_dev * viodev)1577 void vio_unregister_device(struct vio_dev *viodev)
1578 {
1579 device_unregister(&viodev->dev);
1580 }
1581 EXPORT_SYMBOL(vio_unregister_device);
1582
vio_bus_match(struct device * dev,struct device_driver * drv)1583 static int vio_bus_match(struct device *dev, struct device_driver *drv)
1584 {
1585 const struct vio_dev *vio_dev = to_vio_dev(dev);
1586 struct vio_driver *vio_drv = to_vio_driver(drv);
1587 const struct vio_device_id *ids = vio_drv->id_table;
1588
1589 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1590 }
1591
vio_hotplug(struct device * dev,struct kobj_uevent_env * env)1592 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1593 {
1594 const struct vio_dev *vio_dev = to_vio_dev(dev);
1595 struct device_node *dn;
1596 const char *cp;
1597
1598 dn = dev->of_node;
1599 if (!dn)
1600 return -ENODEV;
1601 cp = of_get_property(dn, "compatible", NULL);
1602 if (!cp)
1603 return -ENODEV;
1604
1605 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1606 return 0;
1607 }
1608
1609 struct bus_type vio_bus_type = {
1610 .name = "vio",
1611 .dev_attrs = vio_dev_attrs,
1612 .uevent = vio_hotplug,
1613 .match = vio_bus_match,
1614 .probe = vio_bus_probe,
1615 .remove = vio_bus_remove,
1616 };
1617
1618 /**
1619 * vio_get_attribute: - get attribute for virtual device
1620 * @vdev: The vio device to get property.
1621 * @which: The property/attribute to be extracted.
1622 * @length: Pointer to length of returned data size (unused if NULL).
1623 *
1624 * Calls prom.c's of_get_property() to return the value of the
1625 * attribute specified by @which
1626 */
vio_get_attribute(struct vio_dev * vdev,char * which,int * length)1627 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1628 {
1629 return of_get_property(vdev->dev.of_node, which, length);
1630 }
1631 EXPORT_SYMBOL(vio_get_attribute);
1632
1633 #ifdef CONFIG_PPC_PSERIES
1634 /* vio_find_name() - internal because only vio.c knows how we formatted the
1635 * kobject name
1636 */
vio_find_name(const char * name)1637 static struct vio_dev *vio_find_name(const char *name)
1638 {
1639 struct device *found;
1640
1641 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1642 if (!found)
1643 return NULL;
1644
1645 return to_vio_dev(found);
1646 }
1647
1648 /**
1649 * vio_find_node - find an already-registered vio_dev
1650 * @vnode: device_node of the virtual device we're looking for
1651 */
vio_find_node(struct device_node * vnode)1652 struct vio_dev *vio_find_node(struct device_node *vnode)
1653 {
1654 char kobj_name[20];
1655 struct device_node *vnode_parent;
1656 const char *dev_type;
1657
1658 vnode_parent = of_get_parent(vnode);
1659 if (!vnode_parent)
1660 return NULL;
1661
1662 dev_type = of_get_property(vnode_parent, "device_type", NULL);
1663 of_node_put(vnode_parent);
1664 if (!dev_type)
1665 return NULL;
1666
1667 /* construct the kobject name from the device node */
1668 if (!strcmp(dev_type, "vdevice")) {
1669 const __be32 *prop;
1670
1671 prop = of_get_property(vnode, "reg", NULL);
1672 if (!prop)
1673 return NULL;
1674 snprintf(kobj_name, sizeof(kobj_name), "%x",
1675 (uint32_t)of_read_number(prop, 1));
1676 } else if (!strcmp(dev_type, "ibm,platform-facilities"))
1677 snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
1678 else
1679 return NULL;
1680
1681 return vio_find_name(kobj_name);
1682 }
1683 EXPORT_SYMBOL(vio_find_node);
1684
vio_enable_interrupts(struct vio_dev * dev)1685 int vio_enable_interrupts(struct vio_dev *dev)
1686 {
1687 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1688 if (rc != H_SUCCESS)
1689 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1690 return rc;
1691 }
1692 EXPORT_SYMBOL(vio_enable_interrupts);
1693
vio_disable_interrupts(struct vio_dev * dev)1694 int vio_disable_interrupts(struct vio_dev *dev)
1695 {
1696 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1697 if (rc != H_SUCCESS)
1698 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1699 return rc;
1700 }
1701 EXPORT_SYMBOL(vio_disable_interrupts);
1702 #endif /* CONFIG_PPC_PSERIES */
1703