Lines Matching +full:b +full:- +full:facing
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
115 * xenbus_watch_path - register a watch
123 * success, or -errno on error. On success, the given @path will be saved as
124 * @watch->node, and remains the caller's to free. On error, @watch->node will
137 watch->node = path; in xenbus_watch_path()
138 watch->will_handle = will_handle; in xenbus_watch_path()
139 watch->callback = callback; in xenbus_watch_path()
144 watch->node = NULL; in xenbus_watch_path()
145 watch->will_handle = NULL; in xenbus_watch_path()
146 watch->callback = NULL; in xenbus_watch_path()
156 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
164 * Return 0 on success, or -errno on error. On success, the watched path
165 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
166 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
187 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); in xenbus_watch_pathfmt()
188 return -ENOMEM; in xenbus_watch_pathfmt()
214 (something it was trying to in the past) because dev->state in __xenbus_switch_state()
222 if (state == dev->state) in __xenbus_switch_state()
234 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); in __xenbus_switch_state()
238 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); in __xenbus_switch_state()
248 if (err == -EAGAIN && !abort) in __xenbus_switch_state()
252 dev->state = state; in __xenbus_switch_state()
263 * Return 0 on success, or -errno on error. On error, the device will switch
276 complete(&dev->down); in xenbus_frontend_closed()
294 len = sprintf(printf_buffer, "%i ", -err); in xenbus_va_dev_error()
295 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap); in xenbus_va_dev_error()
297 dev_err(&dev->dev, "%s\n", printf_buffer); in xenbus_va_dev_error()
299 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename); in xenbus_va_dev_error()
375 * -errno on error. On error, the device will switch to
392 err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); in xenbus_grant_ring()
415 * created local port to *port. Return 0 on success, or -errno on error. On
425 alloc_unbound.remote_dom = dev->otherend_id; in xenbus_alloc_evtchn()
440 * Free an existing event channel. Returns 0 on success or -errno on error.
468 * sets *vaddr to that address. Returns 0 on success, and -errno on
481 return -EINVAL; in xenbus_map_ring_valloc()
485 return -ENOMEM; in xenbus_map_ring_valloc()
487 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); in xenbus_map_ring_valloc()
488 if (!info->node) in xenbus_map_ring_valloc()
489 err = -ENOMEM; in xenbus_map_ring_valloc()
491 err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); in xenbus_map_ring_valloc()
493 kfree(info->node); in xenbus_map_ring_valloc()
499 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
500 * long), e.g. 32-on-64. Caller is responsible for preparing the
513 return -EINVAL; in __xenbus_map_ring()
516 gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags, in __xenbus_map_ring()
517 gnt_refs[i], dev->otherend_id); in __xenbus_map_ring()
521 gnttab_batch_map(info->map, i); in __xenbus_map_ring()
524 if (info->map[i].status != GNTST_okay) { in __xenbus_map_ring()
525 xenbus_dev_fatal(dev, info->map[i].status, in __xenbus_map_ring()
527 gnt_refs[i], dev->otherend_id); in __xenbus_map_ring()
530 handles[i] = info->map[i].handle; in __xenbus_map_ring()
538 gnttab_set_unmap_op(&info->unmap[j], in __xenbus_map_ring()
539 info->phys_addrs[i], in __xenbus_map_ring()
545 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)) in __xenbus_map_ring()
550 if (info->unmap[i].status != GNTST_okay) { in __xenbus_map_ring()
556 return -ENOENT; in __xenbus_map_ring()
578 return -EINVAL; in xenbus_unmap_ring()
609 info->phys_addrs[info->idx] = vaddr; in xenbus_map_ring_setup_grant_hvm()
610 info->addrs[info->idx] = vaddr; in xenbus_map_ring_setup_grant_hvm()
612 info->idx++; in xenbus_map_ring_setup_grant_hvm()
621 struct xenbus_map_node *node = info->node; in xenbus_map_ring_hvm()
627 err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_map_ring_hvm()
631 gnttab_foreach_grant(node->hvm.pages, nr_grefs, in xenbus_map_ring_hvm()
635 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, in xenbus_map_ring_hvm()
637 node->nr_handles = nr_grefs; in xenbus_map_ring_hvm()
642 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, in xenbus_map_ring_hvm()
645 err = -ENOMEM; in xenbus_map_ring_hvm()
649 node->hvm.addr = addr; in xenbus_map_ring_hvm()
652 list_add(&node->next, &xenbus_valloc_pages); in xenbus_map_ring_hvm()
656 info->node = NULL; in xenbus_map_ring_hvm()
662 xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs); in xenbus_map_ring_hvm()
668 xen_free_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_map_ring_hvm()
687 return ring_ops->unmap(dev, vaddr); in xenbus_unmap_ring_vfree()
696 info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr; in map_ring_apply()
706 struct xenbus_map_node *node = info->node; in xenbus_map_ring_pv()
709 int err = -ENOMEM; in xenbus_map_ring_pv()
713 return -ENOMEM; in xenbus_map_ring_pv()
714 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in xenbus_map_ring_pv()
717 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, in xenbus_map_ring_pv()
723 node->nr_handles = nr_grefs; in xenbus_map_ring_pv()
724 node->pv.area = area; in xenbus_map_ring_pv()
727 list_add(&node->next, &xenbus_valloc_pages); in xenbus_map_ring_pv()
730 *vaddr = area->addr; in xenbus_map_ring_pv()
731 info->node = NULL; in xenbus_map_ring_pv()
755 if (node->pv.area->addr == vaddr) { in xenbus_unmap_ring_pv()
756 list_del(&node->next); in xenbus_unmap_ring_pv()
765 xenbus_dev_error(dev, -ENOENT, in xenbus_unmap_ring_pv()
770 for (i = 0; i < node->nr_handles; i++) { in xenbus_unmap_ring_pv()
778 unmap[i].handle = node->handles[i]; in xenbus_unmap_ring_pv()
786 for (i = 0; i < node->nr_handles; i++) { in xenbus_unmap_ring_pv()
791 node->handles[i], unmap[i].status); in xenbus_unmap_ring_pv()
798 free_vm_area(node->pv.area); in xenbus_unmap_ring_pv()
801 node->pv.area, node->nr_handles); in xenbus_unmap_ring_pv()
826 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); in xenbus_unmap_ring_setup_grant_hvm()
828 info->idx++; in xenbus_unmap_ring_setup_grant_hvm()
843 addr = node->hvm.addr; in xenbus_unmap_ring_hvm()
845 list_del(&node->next); in xenbus_unmap_ring_hvm()
854 xenbus_dev_error(dev, -ENOENT, in xenbus_unmap_ring_hvm()
859 nr_pages = XENBUS_PAGES(node->nr_handles); in xenbus_unmap_ring_hvm()
861 gnttab_foreach_grant(node->hvm.pages, node->nr_handles, in xenbus_unmap_ring_hvm()
865 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, in xenbus_unmap_ring_hvm()
869 xen_free_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_unmap_ring_hvm()