Home
last modified time | relevance | path

Searched refs:group (Results 1 – 25 of 504) sorted by relevance

12345678910>>...21

/drivers/vfio/
Dgroup.c26 static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, in vfio_device_get_from_name() argument
31 mutex_lock(&group->device_lock); in vfio_device_get_from_name()
32 list_for_each_entry(it, &group->device_list, group_next) { in vfio_device_get_from_name()
50 mutex_unlock(&group->device_lock); in vfio_device_get_from_name()
58 static bool vfio_group_has_iommu(struct vfio_group *group) in vfio_group_has_iommu() argument
60 lockdep_assert_held(&group->group_lock); in vfio_group_has_iommu()
65 WARN_ON(!group->container != !group->container_users); in vfio_group_has_iommu()
67 return group->container || group->iommufd; in vfio_group_has_iommu()
76 static int vfio_group_ioctl_unset_container(struct vfio_group *group) in vfio_group_ioctl_unset_container() argument
80 mutex_lock(&group->group_lock); in vfio_group_ioctl_unset_container()
[all …]
Dcontainer.c167 device->group->container->iommu_driver; in vfio_device_container_register()
171 device->group->container->iommu_data, device); in vfio_device_container_register()
177 device->group->container->iommu_driver; in vfio_device_container_unregister()
181 device->group->container->iommu_data, device); in vfio_device_container_unregister()
239 struct vfio_group *group; in __vfio_container_attach_groups() local
242 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups()
243 ret = driver->ops->attach_group(data, group->iommu_group, in __vfio_container_attach_groups()
244 group->type); in __vfio_container_attach_groups()
252 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups()
254 driver->ops->detach_group(data, group->iommu_group); in __vfio_container_attach_groups()
[all …]
/drivers/infiniband/hw/mlx4/
Dmcg.c50 #define mcg_warn_group(group, format, arg...) \ argument
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_debug_group(group, format, arg...) \ argument
56 (group)->name, (group)->demux->port, ## arg)
58 #define mcg_error_group(group, format, arg...) \ argument
59 pr_err(" %16s: " format, (group)->name, ## arg)
136 struct mcast_group *group; member
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
166 struct mcast_group *group; in mcast_find() local
170 group = rb_entry(node, struct mcast_group, node); in mcast_find()
[all …]
/drivers/iommu/
Diommu.c75 #define for_each_group_device(group, pos) \ argument
76 list_for_each_entry(pos, &(group)->devices, list)
80 ssize_t (*show)(struct iommu_group *group, char *buf);
81 ssize_t (*store)(struct iommu_group *group,
104 struct iommu_group *group);
110 static int __iommu_device_set_domain(struct iommu_group *group,
114 static int __iommu_group_set_domain_internal(struct iommu_group *group,
117 static int __iommu_group_set_domain(struct iommu_group *group, in __iommu_group_set_domain() argument
120 return __iommu_group_set_domain_internal(group, new_domain, 0); in __iommu_group_set_domain()
122 static void __iommu_group_set_domain_nofail(struct iommu_group *group, in __iommu_group_set_domain_nofail() argument
[all …]
Dio-pgfault.c74 struct iopf_group *group; in iopf_handler() local
79 group = container_of(work, struct iopf_group, work); in iopf_handler()
80 domain = iommu_get_domain_for_dev_pasid(group->dev, in iopf_handler()
81 group->last_fault.fault.prm.pasid, 0); in iopf_handler()
85 list_for_each_entry_safe(iopf, next, &group->faults, list) { in iopf_handler()
99 iopf_complete_group(group->dev, &group->last_fault, status); in iopf_handler()
100 kfree(group); in iopf_handler()
146 struct iopf_group *group; in iommu_queue_iopf() local
180 group = kzalloc(sizeof(*group), GFP_KERNEL); in iommu_queue_iopf()
181 if (!group) { in iommu_queue_iopf()
[all …]
/drivers/infiniband/core/
Dmulticast.c117 struct mcast_group *group; member
133 struct mcast_group *group; in mcast_find() local
137 group = rb_entry(node, struct mcast_group, node); in mcast_find()
138 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
140 return group; in mcast_find()
151 struct mcast_group *group, in mcast_insert() argument
163 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
164 sizeof group->rec.mgid); in mcast_insert()
174 rb_link_node(&group->node, parent, link); in mcast_insert()
175 rb_insert_color(&group->node, &port->table); in mcast_insert()
[all …]
Dcma_configfs.c47 struct config_group group; member
59 struct config_group *group; in to_dev_port_group() local
64 group = container_of(item, struct config_group, cg_item); in to_dev_port_group()
65 return container_of(group, struct cma_dev_port_group, group); in to_dev_port_group()
77 struct cma_dev_port_group *group = to_dev_port_group(item); in cma_configfs_params_get() local
80 if (!group) in cma_configfs_params_get()
84 group->cma_dev_group->name); in cma_configfs_params_get()
89 *pgroup = group; in cma_configfs_params_get()
103 struct cma_dev_port_group *group; in default_roce_mode_show() local
107 ret = cma_configfs_params_get(item, &cma_dev, &group); in default_roce_mode_show()
[all …]
/drivers/net/ethernet/microchip/sparx5/
Dsparx5_sdlb.c59 static void sparx5_sdlb_group_disable(struct sparx5 *sparx5, u32 group) in sparx5_sdlb_group_disable() argument
63 ANA_AC_SDLB_PUP_CTRL(group)); in sparx5_sdlb_group_disable()
66 static void sparx5_sdlb_group_enable(struct sparx5 *sparx5, u32 group) in sparx5_sdlb_group_enable() argument
70 ANA_AC_SDLB_PUP_CTRL(group)); in sparx5_sdlb_group_enable()
73 static u32 sparx5_sdlb_group_get_first(struct sparx5 *sparx5, u32 group) in sparx5_sdlb_group_get_first() argument
77 val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_START(group)); in sparx5_sdlb_group_get_first()
82 static u32 sparx5_sdlb_group_get_next(struct sparx5 *sparx5, u32 group, in sparx5_sdlb_group_get_next() argument
92 static bool sparx5_sdlb_group_is_first(struct sparx5 *sparx5, u32 group, in sparx5_sdlb_group_is_first() argument
95 return lb == sparx5_sdlb_group_get_first(sparx5, group); in sparx5_sdlb_group_is_first()
98 static bool sparx5_sdlb_group_is_last(struct sparx5 *sparx5, u32 group, in sparx5_sdlb_group_is_last() argument
[all …]
Dsparx5_qos.c79 static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group) in sparx5_lg_get_leak_time() argument
83 value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group)); in sparx5_lg_get_leak_time()
87 static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group, in sparx5_lg_set_leak_time() argument
91 HSCH_HSCH_TIMER_CFG(layer, group)); in sparx5_lg_set_leak_time()
94 static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group) in sparx5_lg_get_first() argument
98 value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group)); in sparx5_lg_get_first()
102 static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group, in sparx5_lg_get_next() argument
112 static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group) in sparx5_lg_get_last() argument
116 itr = sparx5_lg_get_first(sparx5, layer, group); in sparx5_lg_get_last()
119 next = sparx5_lg_get_next(sparx5, layer, group, itr); in sparx5_lg_get_last()
[all …]
/drivers/gpio/
Dgpio-lpc32xx.c168 static inline u32 gpreg_read(struct lpc32xx_gpio_chip *group, unsigned long offset) in gpreg_read() argument
170 return __raw_readl(group->reg_base + offset); in gpreg_read()
173 static inline void gpreg_write(struct lpc32xx_gpio_chip *group, u32 val, unsigned long offset) in gpreg_write() argument
175 __raw_writel(val, group->reg_base + offset); in gpreg_write()
178 static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group, in __set_gpio_dir_p012() argument
182 gpreg_write(group, GPIO012_PIN_TO_BIT(pin), in __set_gpio_dir_p012()
183 group->gpio_grp->dir_clr); in __set_gpio_dir_p012()
185 gpreg_write(group, GPIO012_PIN_TO_BIT(pin), in __set_gpio_dir_p012()
186 group->gpio_grp->dir_set); in __set_gpio_dir_p012()
189 static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group, in __set_gpio_dir_p3() argument
[all …]
/drivers/clk/renesas/
Dclk-mstp.c59 struct mstp_clock_group *group; member
64 static inline u32 cpg_mstp_read(struct mstp_clock_group *group, in cpg_mstp_read() argument
67 return group->width_8bit ? readb(reg) : readl(reg); in cpg_mstp_read()
70 static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val, in cpg_mstp_write() argument
73 group->width_8bit ? writeb(val, reg) : writel(val, reg); in cpg_mstp_write()
79 struct mstp_clock_group *group = clock->group; in cpg_mstp_clock_endisable() local
85 spin_lock_irqsave(&group->lock, flags); in cpg_mstp_clock_endisable()
87 value = cpg_mstp_read(group, group->smstpcr); in cpg_mstp_clock_endisable()
92 cpg_mstp_write(group, value, group->smstpcr); in cpg_mstp_clock_endisable()
94 if (!group->mstpsr) { in cpg_mstp_clock_endisable()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/esw/
Dqos.c44 static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group, in esw_qos_group_config() argument
52 group->tsar_ix, in esw_qos_group_config()
57 trace_mlx5_esw_group_qos_config(dev, group, group->tsar_ix, bw_share, max_rate); in esw_qos_group_config()
90 struct mlx5_esw_rate_group *group, in esw_qos_calculate_min_rate_divider() argument
99 struct mlx5_esw_rate_group *group; in esw_qos_calculate_min_rate_divider() local
101 list_for_each_entry(group, &esw->qos.groups, list) { in esw_qos_calculate_min_rate_divider()
102 if (group->min_rate < max_guarantee) in esw_qos_calculate_min_rate_divider()
104 max_guarantee = group->min_rate; in esw_qos_calculate_min_rate_divider()
109 evport->qos.group != group || evport->qos.min_rate < max_guarantee) in esw_qos_calculate_min_rate_divider()
121 if (!group_level && !max_guarantee && group && group->bw_share) in esw_qos_calculate_min_rate_divider()
[all …]
/drivers/pinctrl/aspeed/
Dpinmux-aspeed.h513 #define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group argument
514 #define SIG_DESC_LIST_DECL(sig, group, ...) \ argument
515 static const struct aspeed_sig_desc SIG_DESC_LIST_SYM(sig, group)[] = \
518 #define SIG_EXPR_SYM(sig, group) sig_expr_ ## sig ## _ ## group argument
519 #define SIG_EXPR_DECL_(sig, group, func) \ argument
520 static const struct aspeed_sig_expr SIG_EXPR_SYM(sig, group) = \
524 .ndescs = ARRAY_SIZE(SIG_DESC_LIST_SYM(sig, group)), \
525 .descs = &(SIG_DESC_LIST_SYM(sig, group))[0], \
545 #define SIG_EXPR_DECL(sig, group, func, ...) \ argument
546 SIG_DESC_LIST_DECL(sig, group, __VA_ARGS__); \
[all …]
/drivers/media/platform/renesas/rcar-vin/
Drcar-core.c61 static void rvin_group_cleanup(struct rvin_group *group) in rvin_group_cleanup() argument
63 media_device_cleanup(&group->mdev); in rvin_group_cleanup()
64 mutex_destroy(&group->lock); in rvin_group_cleanup()
67 static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin, in rvin_group_init() argument
71 struct media_device *mdev = &group->mdev; in rvin_group_init()
75 mutex_init(&group->lock); in rvin_group_init()
78 group->count = 0; in rvin_group_init()
81 group->count++; in rvin_group_init()
83 vin_dbg(vin, "found %u enabled VIN's in DT", group->count); in rvin_group_init()
85 group->link_setup = link_setup; in rvin_group_init()
[all …]
/drivers/net/ethernet/netronome/nfp/flower/
Dlag_conf.c108 struct nfp_fl_lag_group *group; in nfp_fl_lag_group_create() local
122 group = kmalloc(sizeof(*group), GFP_KERNEL); in nfp_fl_lag_group_create()
123 if (!group) { in nfp_fl_lag_group_create()
128 group->group_id = id; in nfp_fl_lag_group_create()
129 group->master_ndev = master; in nfp_fl_lag_group_create()
130 group->dirty = true; in nfp_fl_lag_group_create()
131 group->offloaded = false; in nfp_fl_lag_group_create()
132 group->to_remove = false; in nfp_fl_lag_group_create()
133 group->to_destroy = false; in nfp_fl_lag_group_create()
134 group->slave_cnt = 0; in nfp_fl_lag_group_create()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/
Dqos_tracepoint.h37 __field(void *, group)
44 __entry->group = vport->qos.group;
48 __entry->bw_share, __entry->max_rate, __entry->group
64 const struct mlx5_esw_rate_group *group,
66 TP_ARGS(dev, group, tsar_ix),
68 __field(const void *, group)
72 __entry->group = group;
76 __get_str(devname), __entry->group, __entry->tsar_ix
82 const struct mlx5_esw_rate_group *group,
84 TP_ARGS(dev, group, tsar_ix)
[all …]
/drivers/pci/endpoint/
Dpci-ep-cfs.c23 struct config_group group; member
33 struct config_group group; member
40 return container_of(to_config_group(item), struct pci_epf_group, group); in to_pci_epf_group()
45 return container_of(to_config_group(item), struct pci_epc_group, group); in to_pci_epc_group()
104 configfs_register_group(&epf_group->group, secondary_epc_group); in pci_ep_cfs_add_secondary_group()
164 configfs_register_group(&epf_group->group, primary_epc_group); in pci_ep_cfs_add_primary_group()
267 struct config_group *group; in pci_ep_cfs_add_epc_group() local
276 group = &epc_group->group; in pci_ep_cfs_add_epc_group()
278 config_group_init_type_name(group, name, &pci_epc_type); in pci_ep_cfs_add_epc_group()
279 ret = configfs_register_group(controllers_group, group); in pci_ep_cfs_add_epc_group()
[all …]
/drivers/soundwire/
Dgeneric_bandwidth_allocation.c198 static int sdw_add_element_group_count(struct sdw_group *group, in sdw_add_element_group_count() argument
201 int num = group->count; in sdw_add_element_group_count()
205 if (rate == group->rates[i]) in sdw_add_element_group_count()
211 if (group->count >= group->max_size) { in sdw_add_element_group_count()
214 group->max_size += 1; in sdw_add_element_group_count()
215 rates = krealloc(group->rates, in sdw_add_element_group_count()
216 (sizeof(int) * group->max_size), in sdw_add_element_group_count()
220 group->rates = rates; in sdw_add_element_group_count()
223 group->rates[group->count++] = rate; in sdw_add_element_group_count()
230 struct sdw_group *group) in sdw_get_group_count() argument
[all …]
/drivers/gpu/drm/i915/gt/
Dintel_gt_mcr.c247 int group, int instance, u32 value) in rw_with_mcr_steering_fw() argument
263 REG_FIELD_PREP(MTL_MCR_GROUPID, group) | in rw_with_mcr_steering_fw()
268 mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance); in rw_with_mcr_steering_fw()
293 mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance); in rw_with_mcr_steering_fw()
324 int group, int instance, in rw_with_mcr_steering() argument
342 val = rw_with_mcr_steering_fw(gt, reg, rw_flag, group, instance, value); in rw_with_mcr_steering()
455 int group, int instance) in intel_gt_mcr_read() argument
457 return rw_with_mcr_steering(gt, reg, FW_REG_READ, group, instance, 0); in intel_gt_mcr_read()
474 int group, int instance) in intel_gt_mcr_unicast_write() argument
476 rw_with_mcr_steering(gt, reg, FW_REG_WRITE, group, instance, value); in intel_gt_mcr_unicast_write()
[all …]
/drivers/dma/idxd/
Ddevice.c385 wq->group = NULL; in idxd_wq_device_reset_cleanup()
670 engine->group = NULL; in idxd_engines_clear_state()
676 struct idxd_group *group; in idxd_groups_clear_state() local
681 group = idxd->groups[i]; in idxd_groups_clear_state()
682 memset(&group->grpcfg, 0, sizeof(group->grpcfg)); in idxd_groups_clear_state()
683 group->num_engines = 0; in idxd_groups_clear_state()
684 group->num_wqs = 0; in idxd_groups_clear_state()
685 group->use_rdbuf_limit = false; in idxd_groups_clear_state()
690 group->rdbufs_allowed = idxd->max_rdbufs; in idxd_groups_clear_state()
691 group->rdbufs_reserved = 0; in idxd_groups_clear_state()
[all …]
/drivers/net/vxlan/
Dvxlan_mdb.c65 struct vxlan_mdb_entry_key group; member
377 struct vxlan_mdb_entry_key *group = &cfg->group; in vxlan_mdb_config_group_set() local
381 group->dst.sa.sa_family = AF_INET; in vxlan_mdb_config_group_set()
382 group->dst.sin.sin_addr.s_addr = entry->addr.u.ip4; in vxlan_mdb_config_group_set()
386 group->dst.sa.sa_family = AF_INET6; in vxlan_mdb_config_group_set()
387 group->dst.sin6.sin6_addr = entry->addr.u.ip6; in vxlan_mdb_config_group_set()
393 vxlan_nla_get_addr(&group->src, source_attr); in vxlan_mdb_config_group_set()
396 static bool vxlan_mdb_is_star_g(const struct vxlan_mdb_entry_key *group) in vxlan_mdb_is_star_g() argument
398 return !vxlan_addr_any(&group->dst) && vxlan_addr_any(&group->src); in vxlan_mdb_is_star_g()
401 static bool vxlan_mdb_is_sg(const struct vxlan_mdb_entry_key *group) in vxlan_mdb_is_sg() argument
[all …]
/drivers/usb/gadget/function/
Duvc_configfs.c120 int (*create_children)(struct config_group *group);
125 struct config_group *group = to_config_group(item); in uvcg_config_item_release() local
127 kfree(group); in uvcg_config_item_release()
137 static int uvcg_config_create_children(struct config_group *group, in uvcg_config_create_children() argument
144 return type->create_children(group); in uvcg_config_create_children()
147 ret = uvcg_config_create_group(group, *child); in uvcg_config_create_children()
158 struct config_group *group; in uvcg_config_create_group() local
160 group = kzalloc(sizeof(*group), GFP_KERNEL); in uvcg_config_create_group()
161 if (!group) in uvcg_config_create_group()
164 config_group_init_type_name(group, type->name, &type->type); in uvcg_config_create_group()
[all …]
/drivers/base/
Dmemory.c197 zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group, in memory_block_online()
218 nr_pages - nr_vmemmap_pages, zone, mem->group); in memory_block_online()
230 adjust_present_page_count(pfn_to_page(start_pfn), mem->group, in memory_block_online()
261 adjust_present_page_count(pfn_to_page(start_pfn), mem->group, in memory_block_offline()
265 nr_pages - nr_vmemmap_pages, mem->zone, mem->group); in memory_block_offline()
270 mem->group, nr_vmemmap_pages); in memory_block_offline()
415 struct memory_group *group, in print_allowed_zone() argument
421 zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages); in print_allowed_zone()
434 struct memory_group *group = mem->group; in valid_zones_show() local
455 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group, in valid_zones_show()
[all …]
/drivers/s390/block/
Ddasd_alias.c89 struct alias_pav_group, group); in _find_group()
97 list_for_each_entry(pos, &lcu->grouplist, group) { in _find_group()
318 struct alias_pav_group *group; in _add_device_to_lcu() local
332 group = _find_group(lcu, &uid); in _add_device_to_lcu()
333 if (!group) { in _add_device_to_lcu()
334 group = kzalloc(sizeof(*group), GFP_ATOMIC); in _add_device_to_lcu()
335 if (!group) in _add_device_to_lcu()
337 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor)); in _add_device_to_lcu()
338 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial)); in _add_device_to_lcu()
339 group->uid.ssid = uid.ssid; in _add_device_to_lcu()
[all …]
/drivers/pinctrl/tegra/
Dpinctrl-tegra.c50 unsigned group) in tegra_pinctrl_get_group_name() argument
54 return pmx->soc->groups[group].name; in tegra_pinctrl_get_group_name()
58 unsigned group, in tegra_pinctrl_get_group_pins() argument
64 *pins = pmx->soc->groups[group].pins; in tegra_pinctrl_get_group_pins()
65 *num_pins = pmx->soc->groups[group].npins; in tegra_pinctrl_get_group_pins()
116 const char *group; in tegra_pinctrl_dt_subnode_to_map() local
159 of_property_for_each_string(np, "nvidia,pins", prop, group) { in tegra_pinctrl_dt_subnode_to_map()
162 reserved_maps, num_maps, group, in tegra_pinctrl_dt_subnode_to_map()
170 reserved_maps, num_maps, group, in tegra_pinctrl_dt_subnode_to_map()
253 unsigned group) in tegra_pinctrl_set_mux() argument
[all …]

12345678910>>...21