Home
last modified time | relevance | path

Searched refs:md (Results 1 – 25 of 138) sorted by relevance

123456

/drivers/video/fbdev/omap/
Dlcd_mipid.c54 static void mipid_transfer(struct mipid_device *md, int cmd, const u8 *wbuf, in mipid_transfer() argument
62 BUG_ON(md->spi == NULL); in mipid_transfer()
103 r = spi_sync(md->spi, &m); in mipid_transfer()
105 dev_dbg(&md->spi->dev, "spi_sync %d\n", r); in mipid_transfer()
111 static inline void mipid_cmd(struct mipid_device *md, int cmd) in mipid_cmd() argument
113 mipid_transfer(md, cmd, NULL, 0, NULL, 0); in mipid_cmd()
116 static inline void mipid_write(struct mipid_device *md, in mipid_write() argument
119 mipid_transfer(md, reg, buf, len, NULL, 0); in mipid_write()
122 static inline void mipid_read(struct mipid_device *md, in mipid_read() argument
125 mipid_transfer(md, reg, NULL, 0, buf, len); in mipid_read()
[all …]
/drivers/md/
Ddm.c301 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument
303 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
308 struct mapped_device *md; in dm_blk_open() local
312 md = bdev->bd_disk->private_data; in dm_blk_open()
313 if (!md) in dm_blk_open()
316 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
317 dm_deleting_md(md)) { in dm_blk_open()
318 md = NULL; in dm_blk_open()
322 dm_get(md); in dm_blk_open()
323 atomic_inc(&md->open_count); in dm_blk_open()
[all …]
Ddm-era-target.c34 struct writeset_metadata md; member
94 ws->md.nr_bits = nr_blocks; in writeset_init()
95 r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); in writeset_init()
139 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); in writeset_test_and_set()
301 static int superblock_read_lock(struct era_metadata *md, in superblock_read_lock() argument
304 return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, in superblock_read_lock()
308 static int superblock_lock_zero(struct era_metadata *md, in superblock_lock_zero() argument
311 return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION, in superblock_lock_zero()
315 static int superblock_lock(struct era_metadata *md, in superblock_lock() argument
318 return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION, in superblock_lock()
[all …]
Ddm-ima.c67 static int dm_ima_alloc_and_copy_name_uuid(struct mapped_device *md, char **dev_name, in dm_ima_alloc_and_copy_name_uuid() argument
83 r = dm_copy_name_and_uuid(md, *dev_name, *dev_uuid); in dm_ima_alloc_and_copy_name_uuid()
102 static int dm_ima_alloc_and_copy_device_data(struct mapped_device *md, char **device_data, in dm_ima_alloc_and_copy_device_data() argument
108 r = dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio); in dm_ima_alloc_and_copy_device_data()
120 dev_name, dev_uuid, md->disk->major, md->disk->first_minor, in dm_ima_alloc_and_copy_device_data()
121 md->disk->minors, num_targets); in dm_ima_alloc_and_copy_device_data()
149 static int dm_ima_alloc_and_copy_capacity_str(struct mapped_device *md, char **capacity_str, in dm_ima_alloc_and_copy_capacity_str() argument
154 capacity = get_capacity(md->disk); in dm_ima_alloc_and_copy_capacity_str()
169 void dm_ima_reset_data(struct mapped_device *md) in dm_ima_reset_data() argument
171 memset(&(md->ima), 0, sizeof(md->ima)); in dm_ima_reset_data()
[all …]
Ddm-rq.c19 struct mapped_device *md; member
60 int dm_request_based(struct mapped_device *md) in dm_request_based() argument
62 return queue_is_mq(md->queue); in dm_request_based()
128 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument
130 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats()
133 dm_stats_account_io(&md->stats, rq_data_dir(orig), in rq_end_stats()
144 static void rq_completed(struct mapped_device *md) in rq_completed() argument
149 dm_put(md); in rq_completed()
160 struct mapped_device *md = tio->md; in dm_end_request() local
166 rq_end_stats(md, rq); in dm_end_request()
[all …]
Ddm-zone.c20 static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t, in dm_blk_do_report_zones() argument
24 struct gendisk *disk = md->disk; in dm_blk_do_report_zones()
58 struct mapped_device *md = disk->private_data; in dm_blk_report_zones() local
62 if (dm_suspended_md(md)) in dm_blk_report_zones()
65 map = dm_get_live_table(md, &srcu_idx); in dm_blk_report_zones()
69 ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data); in dm_blk_report_zones()
71 dm_put_live_table(md, srcu_idx); in dm_blk_report_zones()
124 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) in dm_is_zone_write() argument
126 struct request_queue *q = md->queue; in dm_is_zone_write()
141 void dm_cleanup_zoned_dev(struct mapped_device *md) in dm_cleanup_zoned_dev() argument
[all …]
Ddm-ioctl.c49 struct mapped_device *md; member
92 dm_get(hc->md); in __get_name_cell()
109 dm_get(hc->md); in __get_uuid_cell()
182 struct mapped_device *md; in __get_dev_cell() local
185 md = dm_get_md(huge_decode_dev(dev)); in __get_dev_cell()
186 if (!md) in __get_dev_cell()
189 hc = dm_get_mdptr(md); in __get_dev_cell()
191 dm_put(md); in __get_dev_cell()
202 struct mapped_device *md) in alloc_cell() argument
229 hc->md = md; in alloc_cell()
[all …]
Ddm.h81 void dm_lock_md_type(struct mapped_device *md);
82 void dm_unlock_md_type(struct mapped_device *md);
83 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
84 enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
85 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
87 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
111 void dm_cleanup_zoned_dev(struct mapped_device *md);
114 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
117 static inline void dm_cleanup_zoned_dev(struct mapped_device *md) {} in dm_cleanup_zoned_dev() argument
119 static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) in dm_is_zone_write() argument
[all …]
Ddm-sysfs.c26 struct mapped_device *md; in dm_attr_show() local
33 md = dm_get_from_kobject(kobj); in dm_attr_show()
34 if (!md) in dm_attr_show()
37 ret = dm_attr->show(md, page); in dm_attr_show()
38 dm_put(md); in dm_attr_show()
51 struct mapped_device *md; in dm_attr_store() local
58 md = dm_get_from_kobject(kobj); in dm_attr_store()
59 if (!md) in dm_attr_store()
62 ret = dm_attr->store(md, page, count); in dm_attr_store()
63 dm_put(md); in dm_attr_store()
[all …]
/drivers/net/mdio/
Dmdio-mux-bcm-iproc.c57 static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md) in mdio_mux_iproc_config() argument
63 val = readl(md->base + MDIO_SCAN_CTRL_OFFSET); in mdio_mux_iproc_config()
65 writel(val, md->base + MDIO_SCAN_CTRL_OFFSET); in mdio_mux_iproc_config()
67 if (md->core_clk) { in mdio_mux_iproc_config()
71 divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY; in mdio_mux_iproc_config()
75 writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET); in mdio_mux_iproc_config()
76 writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET); in mdio_mux_iproc_config()
136 struct iproc_mdiomux_desc *md = bus->priv; in iproc_mdiomux_read() local
139 ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP); in iproc_mdiomux_read()
149 struct iproc_mdiomux_desc *md = bus->priv; in iproc_mdiomux_write() local
[all …]
Dmdio-mux-bcm6368.c39 struct bcm6368_mdiomux_desc *md = bus->priv; in bcm6368_mdiomux_read() local
43 __raw_writel(0, md->base + MDIOC_REG); in bcm6368_mdiomux_read()
48 if (md->ext_phy) in bcm6368_mdiomux_read()
51 __raw_writel(reg, md->base + MDIOC_REG); in bcm6368_mdiomux_read()
53 ret = __raw_readw(md->base + MDIOD_REG); in bcm6368_mdiomux_read()
61 struct bcm6368_mdiomux_desc *md = bus->priv; in bcm6368_mdiomux_write() local
64 __raw_writel(0, md->base + MDIOC_REG); in bcm6368_mdiomux_write()
69 if (md->ext_phy) in bcm6368_mdiomux_write()
73 __raw_writel(reg, md->base + MDIOC_REG); in bcm6368_mdiomux_write()
82 struct bcm6368_mdiomux_desc *md = data; in bcm6368_mdiomux_switch_fn() local
[all …]
/drivers/soundwire/
Dmaster.c42 struct sdw_master_device *md = dev_to_sdw_master_device(dev); \
43 return sprintf(buf, format_string, md->bus->prop.field); \
59 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_frequencies_show() local
63 for (i = 0; i < md->bus->prop.num_clk_freq; i++) in clock_frequencies_show()
65 md->bus->prop.clk_freq[i]); in clock_frequencies_show()
75 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in clock_gears_show() local
79 for (i = 0; i < md->bus->prop.num_clk_gears; i++) in clock_gears_show()
81 md->bus->prop.clk_gears[i]); in clock_gears_show()
105 struct sdw_master_device *md = dev_to_sdw_master_device(dev); in sdw_master_device_release() local
107 kfree(md); in sdw_master_device_release()
[all …]
/drivers/clk/qcom/
Dclk-regmap-mux-div.c23 int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) in mux_div_set_src_div() argument
27 const char *name = clk_hw_get_name(&md->clkr.hw); in mux_div_set_src_div()
29 val = (div << md->hid_shift) | (src << md->src_shift); in mux_div_set_src_div()
30 mask = ((BIT(md->hid_width) - 1) << md->hid_shift) | in mux_div_set_src_div()
31 ((BIT(md->src_width) - 1) << md->src_shift); in mux_div_set_src_div()
33 ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset, in mux_div_set_src_div()
38 ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset, in mux_div_set_src_div()
45 ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, in mux_div_set_src_div()
59 static void mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, in mux_div_get_src_div() argument
63 const char *name = clk_hw_get_name(&md->clkr.hw); in mux_div_get_src_div()
[all …]
/drivers/rapidio/devices/
Drio_mport_cdev.c123 struct mport_dev *md; member
199 struct mport_dev *md; member
261 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_rd()
306 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_wr()
359 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_outbound_mapping() argument
363 struct rio_mport *mport = md->mport; in rio_mport_create_outbound_mapping()
383 map->md = md; in rio_mport_create_outbound_mapping()
385 list_add_tail(&map->node, &md->mappings); in rio_mport_create_outbound_mapping()
393 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_outbound_mapping() argument
400 mutex_lock(&md->buf_mutex); in rio_mport_get_outbound_mapping()
[all …]
/drivers/dma/
Duniphier-mdmac.c62 struct uniphier_mdmac_desc *md; member
94 mc->md = NULL; in uniphier_mdmac_next_desc()
100 mc->md = to_uniphier_mdmac_desc(vd); in uniphier_mdmac_next_desc()
102 return mc->md; in uniphier_mdmac_next_desc()
107 struct uniphier_mdmac_desc *md) in uniphier_mdmac_handle() argument
114 sg = &md->sgl[md->sg_cur]; in uniphier_mdmac_handle()
116 if (md->dir == DMA_MEM_TO_DEV) { in uniphier_mdmac_handle()
147 struct uniphier_mdmac_desc *md; in uniphier_mdmac_start() local
149 md = uniphier_mdmac_next_desc(mc); in uniphier_mdmac_start()
150 if (md) in uniphier_mdmac_start()
[all …]
Dmilbeaut-hdmac.c70 struct milbeaut_hdmac_desc *md; member
103 mc->md = NULL; in milbeaut_hdmac_next_desc()
109 mc->md = to_milbeaut_hdmac_desc(vd); in milbeaut_hdmac_next_desc()
111 return mc->md; in milbeaut_hdmac_next_desc()
116 struct milbeaut_hdmac_desc *md) in milbeaut_chan_start() argument
122 sg = &md->sgl[md->sg_cur]; in milbeaut_chan_start()
126 if (md->dir == DMA_MEM_TO_DEV) { in milbeaut_chan_start()
164 struct milbeaut_hdmac_desc *md; in milbeaut_hdmac_start() local
166 md = milbeaut_hdmac_next_desc(mc); in milbeaut_hdmac_start()
167 if (md) in milbeaut_hdmac_start()
[all …]
Dmilbeaut-xdmac.c70 struct milbeaut_xdmac_desc *md; member
100 mc->md = NULL; in milbeaut_xdmac_next_desc()
106 mc->md = to_milbeaut_xdmac_desc(vd); in milbeaut_xdmac_next_desc()
108 return mc->md; in milbeaut_xdmac_next_desc()
113 struct milbeaut_xdmac_desc *md) in milbeaut_chan_start() argument
118 val = md->len - 1; in milbeaut_chan_start()
121 val = md->src; in milbeaut_chan_start()
124 val = md->dst; in milbeaut_chan_start()
152 struct milbeaut_xdmac_desc *md; in milbeaut_xdmac_start() local
154 md = milbeaut_xdmac_next_desc(mc); in milbeaut_xdmac_start()
[all …]
/drivers/mmc/core/
Dblock.c168 struct mmc_blk_data *md; member
187 struct mmc_blk_data *md; in mmc_blk_get() local
190 md = disk->private_data; in mmc_blk_get()
191 if (md && !kref_get_unless_zero(&md->kref)) in mmc_blk_get()
192 md = NULL; in mmc_blk_get()
195 return md; in mmc_blk_get()
206 struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); in mmc_blk_kref_release() local
209 devidx = mmc_get_devidx(md->disk); in mmc_blk_kref_release()
213 md->disk->private_data = NULL; in mmc_blk_kref_release()
216 put_disk(md->disk); in mmc_blk_kref_release()
[all …]
/drivers/firmware/efi/
Dmemmap.c250 int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range) in efi_memmap_split_count() argument
256 start = md->phys_addr; in efi_memmap_split_count()
257 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_split_count()
294 efi_memory_desc_t *md; in efi_memmap_insert() local
320 md = new; in efi_memmap_insert()
321 start = md->phys_addr; in efi_memmap_insert()
322 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; in efi_memmap_insert()
325 md->attribute |= m_attr; in efi_memmap_insert()
330 md->attribute |= m_attr; in efi_memmap_insert()
331 md->num_pages = (m_end - md->phys_addr + 1) >> in efi_memmap_insert()
[all …]
Defi-init.c25 static int __init is_memory(efi_memory_desc_t *md) in is_memory() argument
27 if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC)) in is_memory()
39 efi_memory_desc_t *md; in efi_to_phys() local
41 for_each_efi_memory_desc(md) { in efi_to_phys()
42 if (!(md->attribute & EFI_MEMORY_RUNTIME)) in efi_to_phys()
44 if (md->virt_addr == 0) in efi_to_phys()
47 if (md->virt_addr <= addr && in efi_to_phys()
48 (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) in efi_to_phys()
49 return md->phys_addr + addr - md->virt_addr; in efi_to_phys()
134 static __init int is_usable_memory(efi_memory_desc_t *md) in is_usable_memory() argument
[all …]
Dmemattr.c60 efi_memory_desc_t *md; in entry_is_valid() local
84 for_each_efi_memory_desc(md) { in entry_is_valid()
85 u64 md_paddr = md->phys_addr; in entry_is_valid()
86 u64 md_size = md->num_pages << EFI_PAGE_SHIFT; in entry_is_valid()
88 if (!(md->attribute & EFI_MEMORY_RUNTIME)) in entry_is_valid()
90 if (md->virt_addr == 0 && md->phys_addr != 0) { in entry_is_valid()
107 if (md->type != in->type) { in entry_is_valid()
112 out->virt_addr = in_paddr + (md->virt_addr - md_paddr); in entry_is_valid()
157 efi_memory_desc_t md; in efi_memattr_apply_permissions() local
163 &md); in efi_memattr_apply_permissions()
[all …]
Driscv-runtime.c32 efi_memory_desc_t *md; in efi_virtmap_init() local
38 for_each_efi_memory_desc(md) { in efi_virtmap_init()
39 phys_addr_t phys = md->phys_addr; in efi_virtmap_init()
42 if (!(md->attribute & EFI_MEMORY_RUNTIME)) in efi_virtmap_init()
44 if (md->virt_addr == 0) in efi_virtmap_init()
47 ret = efi_create_mapping(&efi_mm, md); in efi_virtmap_init()
85 efi_memory_desc_t *md; in riscv_enable_runtime_services() local
87 for_each_efi_memory_desc(md) { in riscv_enable_runtime_services()
88 int md_size = md->num_pages << EFI_PAGE_SHIFT; in riscv_enable_runtime_services()
91 if (!(md->attribute & EFI_MEMORY_SP)) in riscv_enable_runtime_services()
[all …]
/drivers/video/fbdev/matrox/
Dmatroxfb_maven.c135 static int* get_ctrl_ptr(struct maven_data* md, int idx) { in get_ctrl_ptr() argument
136 return (int*)((char*)(md->primary_head) + maven_controls[idx].control); in get_ctrl_ptr()
340 static unsigned char maven_compute_deflicker (const struct maven_data* md) { in maven_compute_deflicker() argument
343 df = (md->version == MGATVO_B?0x40:0x00); in maven_compute_deflicker()
344 switch (md->primary_head->altout.tvo_params.deflicker) { in maven_compute_deflicker()
358 static void maven_compute_bwlevel (const struct maven_data* md, in maven_compute_bwlevel() argument
360 const int b = md->primary_head->altout.tvo_params.brightness + BLMIN; in maven_compute_bwlevel()
361 const int c = md->primary_head->altout.tvo_params.contrast; in maven_compute_bwlevel()
367 static const struct maven_gamma* maven_compute_gamma (const struct maven_data* md) { in maven_compute_gamma() argument
368 return maven_gamma + md->primary_head->altout.tvo_params.gamma; in maven_compute_gamma()
[all …]
/drivers/firmware/efi/libstub/
Drandomalloc.c17 static unsigned long get_entry_num_slots(efi_memory_desc_t *md, in get_entry_num_slots() argument
24 if (md->type != EFI_CONVENTIONAL_MEMORY) in get_entry_num_slots()
28 (md->attribute & EFI_MEMORY_SP)) in get_entry_num_slots()
31 region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1, in get_entry_num_slots()
36 first_slot = round_up(md->phys_addr, align); in get_entry_num_slots()
51 #define MD_NUM_SLOTS(md) ((md)->virt_addr) argument
83 efi_memory_desc_t *md = (void *)memory_map + map_offset; in efi_random_alloc() local
86 slots = get_entry_num_slots(md, size, ilog2(align)); in efi_random_alloc()
87 MD_NUM_SLOTS(md) = slots; in efi_random_alloc()
106 efi_memory_desc_t *md = (void *)memory_map + map_offset; in efi_random_alloc() local
[all …]
/drivers/soc/xilinx/
Dzynqmp_power.c135 int md; in suspend_mode_show() local
137 for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++) in suspend_mode_show()
138 if (suspend_modes[md]) { in suspend_mode_show()
139 if (md == suspend_mode) in suspend_mode_show()
140 s += sprintf(s, "[%s] ", suspend_modes[md]); in suspend_mode_show()
142 s += sprintf(s, "%s ", suspend_modes[md]); in suspend_mode_show()
155 int md, ret = -EINVAL; in suspend_mode_store() local
157 for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++) in suspend_mode_store()
158 if (suspend_modes[md] && in suspend_mode_store()
159 sysfs_streq(suspend_modes[md], buf)) { in suspend_mode_store()
[all …]

123456