Home
last modified time | relevance | path

Searched refs:block (Results 1 – 25 of 548) sorted by relevance

12345678910>>...22

/drivers/iio/buffer/
Dindustrialio-buffer-dma.c95 struct iio_dma_buffer_block *block = container_of(kref, in iio_buffer_block_release() local
98 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD); in iio_buffer_block_release()
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
101 block->vaddr, block->phys_addr); in iio_buffer_block_release()
103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release()
104 kfree(block); in iio_buffer_block_release()
107 static void iio_buffer_block_get(struct iio_dma_buffer_block *block) in iio_buffer_block_get() argument
109 kref_get(&block->kref); in iio_buffer_block_get()
112 static void iio_buffer_block_put(struct iio_dma_buffer_block *block) in iio_buffer_block_put() argument
114 kref_put(&block->kref, iio_buffer_block_release); in iio_buffer_block_put()
[all …]
/drivers/gpu/drm/i915/
Di915_buddy.c20 struct i915_buddy_block *block; in i915_block_alloc() local
24 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL); in i915_block_alloc()
25 if (!block) in i915_block_alloc()
28 block->header = offset; in i915_block_alloc()
29 block->header |= order; in i915_block_alloc()
30 block->parent = parent; in i915_block_alloc()
32 GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED); in i915_block_alloc()
33 return block; in i915_block_alloc()
37 struct i915_buddy_block *block) in i915_block_free() argument
39 kmem_cache_free(slab_blocks, block); in i915_block_free()
[all …]
Di915_buddy.h75 i915_buddy_block_offset(struct i915_buddy_block *block) in i915_buddy_block_offset() argument
77 return block->header & I915_BUDDY_HEADER_OFFSET; in i915_buddy_block_offset()
81 i915_buddy_block_order(struct i915_buddy_block *block) in i915_buddy_block_order() argument
83 return block->header & I915_BUDDY_HEADER_ORDER; in i915_buddy_block_order()
87 i915_buddy_block_state(struct i915_buddy_block *block) in i915_buddy_block_state() argument
89 return block->header & I915_BUDDY_HEADER_STATE; in i915_buddy_block_state()
93 i915_buddy_block_is_allocated(struct i915_buddy_block *block) in i915_buddy_block_is_allocated() argument
95 return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED; in i915_buddy_block_is_allocated()
99 i915_buddy_block_is_free(struct i915_buddy_block *block) in i915_buddy_block_is_free() argument
101 return i915_buddy_block_state(block) == I915_BUDDY_FREE; in i915_buddy_block_is_free()
[all …]
/drivers/gpio/
Dgpio-sch311x.c134 struct sch311x_gpio_block *block = gpiochip_get_data(chip); in sch311x_gpio_request() local
136 if (block->config_regs[offset] == 0) /* GPIO is not available */ in sch311x_gpio_request()
139 if (!request_region(block->runtime_reg + block->config_regs[offset], in sch311x_gpio_request()
142 block->runtime_reg + block->config_regs[offset]); in sch311x_gpio_request()
150 struct sch311x_gpio_block *block = gpiochip_get_data(chip); in sch311x_gpio_free() local
152 if (block->config_regs[offset] == 0) /* GPIO is not available */ in sch311x_gpio_free()
155 release_region(block->runtime_reg + block->config_regs[offset], 1); in sch311x_gpio_free()
160 struct sch311x_gpio_block *block = gpiochip_get_data(chip); in sch311x_gpio_get() local
163 spin_lock(&block->lock); in sch311x_gpio_get()
164 data = inb(block->runtime_reg + block->data_reg); in sch311x_gpio_get()
[all …]
/drivers/gpu/drm/i915/selftests/
Di915_buddy.c12 struct i915_buddy_block *block, in __igt_dump_block() argument
16 block->header, in __igt_dump_block()
17 i915_buddy_block_state(block), in __igt_dump_block()
18 i915_buddy_block_order(block), in __igt_dump_block()
19 i915_buddy_block_offset(block), in __igt_dump_block()
20 i915_buddy_block_size(mm, block), in __igt_dump_block()
21 yesno(!block->parent), in __igt_dump_block()
26 struct i915_buddy_block *block) in igt_dump_block() argument
30 __igt_dump_block(mm, block, false); in igt_dump_block()
32 buddy = get_buddy(block); in igt_dump_block()
[all …]
/drivers/mtd/
Dnftlmount.c28 unsigned int block, boot_record_count = 0; in find_boot_record() local
48 for (block = 0; block < nftl->nb_blocks; block++) { in find_boot_record()
53 ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE, in find_boot_record()
62 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); in find_boot_record()
73 block * nftl->EraseSize, nftl->mbd.mtd->index); in find_boot_record()
79 ret = nftl_read_oob(mtd, block * nftl->EraseSize + in find_boot_record()
84 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); in find_boot_record()
94 block * nftl->EraseSize, nftl->mbd.mtd->index, in find_boot_record()
100 ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE, in find_boot_record()
104 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); in find_boot_record()
[all …]
Dinftlmount.c35 unsigned int i, block; in find_boot_record() local
55 for (block = 0; block < inftl->nb_blocks; block++) { in find_boot_record()
62 ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE, in find_boot_record()
72 block * inftl->EraseSize, in find_boot_record()
89 block * inftl->EraseSize + SECTORSIZE + 8, in find_boot_record()
94 "(err %d)\n", block * inftl->EraseSize, in find_boot_record()
107 mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE, in find_boot_record()
172 block >>= mh->BlockMultiplierBits; in find_boot_record()
280 inftl->PUtable[block] = BLOCK_RESERVED; in find_boot_record()
294 inftl->MediaUnit = block; in find_boot_record()
[all …]
Drfd_ftl.c57 struct block { struct
88 struct block *blocks;
95 struct block *block = &part->blocks[block_no]; in build_block_map() local
98 block->offset = part->block_size * block_no; in build_block_map()
101 block->state = BLOCK_UNUSED; in build_block_map()
105 block->state = BLOCK_OK; in build_block_map()
116 block->free_sectors++; in build_block_map()
139 part->sector_map[entry] = block->offset + in build_block_map()
142 block->used_sectors++; in build_block_map()
145 if (block->free_sectors == part->data_sectors_per_block) in build_block_map()
[all …]
Dinftlcore.c242 int block, silly; in INFTL_foldchain() local
267 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { in INFTL_foldchain()
268 if ((BlockMap[block] != BLOCK_NIL) || in INFTL_foldchain()
269 BlockDeleted[block]) in INFTL_foldchain()
273 + (block * SECTORSIZE), 16, &retlen, in INFTL_foldchain()
284 BlockMap[block] = thisEUN; in INFTL_foldchain()
287 BlockDeleted[block] = 1; in INFTL_foldchain()
292 block, thisEUN, status); in INFTL_foldchain()
313 for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { in INFTL_foldchain()
321 if (BlockMap[block] == targetEUN || (pendingblock == in INFTL_foldchain()
[all …]
Dsm_ftl.c192 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset) in sm_mkoffset() argument
196 WARN_ON(block >= ftl->zone_size); in sm_mkoffset()
199 if (block == -1) in sm_mkoffset()
202 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset; in sm_mkoffset()
207 int *zone, int *block, int *boffset) in sm_break_offset() argument
211 *block = do_div(offset, ftl->max_lba); in sm_break_offset()
238 int zone, int block, int boffset, in sm_read_sector() argument
248 if (block == -1) { in sm_read_sector()
270 if (zone == 0 && block == ftl->cis_block && boffset == in sm_read_sector()
282 ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); in sm_read_sector()
[all …]
/drivers/w1/
Dw1_netlink.c41 struct w1_cb_block *block; member
55 static u16 w1_reply_len(struct w1_cb_block *block) in w1_reply_len() argument
57 if (!block->cn) in w1_reply_len()
59 return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len; in w1_reply_len()
62 static void w1_unref_block(struct w1_cb_block *block) in w1_unref_block() argument
64 if (atomic_sub_return(1, &block->refcnt) == 0) { in w1_unref_block()
65 u16 len = w1_reply_len(block); in w1_unref_block()
67 cn_netlink_send_mult(block->first_cn, len, in w1_unref_block()
68 block->portid, 0, GFP_KERNEL); in w1_unref_block()
70 kfree(block); in w1_unref_block()
[all …]
/drivers/net/ethernet/marvell/prestera/
Dprestera_flow.c15 static int prestera_flow_block_mall_cb(struct prestera_flow_block *block, in prestera_flow_block_mall_cb() argument
20 return prestera_span_replace(block, f); in prestera_flow_block_mall_cb()
22 prestera_span_destroy(block); in prestera_flow_block_mall_cb()
29 static int prestera_flow_block_flower_cb(struct prestera_flow_block *block, in prestera_flow_block_flower_cb() argument
37 return prestera_flower_replace(block, f); in prestera_flow_block_flower_cb()
39 prestera_flower_destroy(block, f); in prestera_flow_block_flower_cb()
42 return prestera_flower_stats(block, f); in prestera_flow_block_flower_cb()
51 struct prestera_flow_block *block = cb_priv; in prestera_flow_block_cb() local
55 return prestera_flow_block_flower_cb(block, type_data); in prestera_flow_block_cb()
57 return prestera_flow_block_mall_cb(block, type_data); in prestera_flow_block_cb()
[all …]
Dprestera_acl.c27 struct prestera_flow_block *block; member
81 struct prestera_flow_block *block; in prestera_acl_block_create() local
83 block = kzalloc(sizeof(*block), GFP_KERNEL); in prestera_acl_block_create()
84 if (!block) in prestera_acl_block_create()
86 INIT_LIST_HEAD(&block->binding_list); in prestera_acl_block_create()
87 block->net = net; in prestera_acl_block_create()
88 block->sw = sw; in prestera_acl_block_create()
90 block->ruleset = prestera_acl_ruleset_create(sw); in prestera_acl_block_create()
91 if (IS_ERR(block->ruleset)) { in prestera_acl_block_create()
92 kfree(block); in prestera_acl_block_create()
[all …]
/drivers/video/fbdev/
Dedid.h72 #define PIXEL_CLOCK_LO (unsigned)block[ 0 ]
73 #define PIXEL_CLOCK_HI (unsigned)block[ 1 ]
75 #define H_ACTIVE_LO (unsigned)block[ 2 ]
76 #define H_BLANKING_LO (unsigned)block[ 3 ]
77 #define H_ACTIVE_HI UPPER_NIBBLE( (unsigned)block[ 4 ] )
79 #define H_BLANKING_HI LOWER_NIBBLE( (unsigned)block[ 4 ] )
82 #define V_ACTIVE_LO (unsigned)block[ 5 ]
83 #define V_BLANKING_LO (unsigned)block[ 6 ]
84 #define V_ACTIVE_HI UPPER_NIBBLE( (unsigned)block[ 7 ] )
86 #define V_BLANKING_HI LOWER_NIBBLE( (unsigned)block[ 7 ] )
[all …]
/drivers/net/ethernet/mellanox/mlxsw/
Dcore_acl_flex_actions.c381 void (*destructor)(struct mlxsw_afa_block *block,
385 static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block, in mlxsw_afa_resource_add() argument
388 list_add(&resource->list, &block->resource_list); in mlxsw_afa_resource_add()
396 static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block) in mlxsw_afa_resources_destroy() argument
400 list_for_each_entry_safe(resource, tmp, &block->resource_list, list) { in mlxsw_afa_resources_destroy()
401 resource->destructor(block, resource); in mlxsw_afa_resources_destroy()
407 struct mlxsw_afa_block *block; in mlxsw_afa_block_create() local
409 block = kzalloc(sizeof(*block), GFP_KERNEL); in mlxsw_afa_block_create()
410 if (!block) in mlxsw_afa_block_create()
412 INIT_LIST_HEAD(&block->resource_list); in mlxsw_afa_block_create()
[all …]
Dspectrum_flow.c15 struct mlxsw_sp_flow_block *block; in mlxsw_sp_flow_block_create() local
17 block = kzalloc(sizeof(*block), GFP_KERNEL); in mlxsw_sp_flow_block_create()
18 if (!block) in mlxsw_sp_flow_block_create()
20 INIT_LIST_HEAD(&block->binding_list); in mlxsw_sp_flow_block_create()
21 INIT_LIST_HEAD(&block->mall.list); in mlxsw_sp_flow_block_create()
22 block->mlxsw_sp = mlxsw_sp; in mlxsw_sp_flow_block_create()
23 block->net = net; in mlxsw_sp_flow_block_create()
24 return block; in mlxsw_sp_flow_block_create()
27 void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block) in mlxsw_sp_flow_block_destroy() argument
29 WARN_ON(!list_empty(&block->binding_list)); in mlxsw_sp_flow_block_destroy()
[all …]
Dcore_acl_flex_actions.h47 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
48 int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
49 char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
50 char *mlxsw_afa_block_cur_set(struct mlxsw_afa_block *block);
51 u32 mlxsw_afa_block_first_kvdl_index(struct mlxsw_afa_block *block);
52 int mlxsw_afa_block_activity_get(struct mlxsw_afa_block *block, bool *activity);
53 int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
54 int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
55 int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
58 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block, bool ingress,
[all …]
/drivers/video/fbdev/core/
Dfbmon.c96 static int edid_is_serial_block(unsigned char *block) in edid_is_serial_block() argument
98 if ((block[0] == 0x00) && (block[1] == 0x00) && in edid_is_serial_block()
99 (block[2] == 0x00) && (block[3] == 0xff) && in edid_is_serial_block()
100 (block[4] == 0x00)) in edid_is_serial_block()
106 static int edid_is_ascii_block(unsigned char *block) in edid_is_ascii_block() argument
108 if ((block[0] == 0x00) && (block[1] == 0x00) && in edid_is_ascii_block()
109 (block[2] == 0x00) && (block[3] == 0xfe) && in edid_is_ascii_block()
110 (block[4] == 0x00)) in edid_is_ascii_block()
116 static int edid_is_limits_block(unsigned char *block) in edid_is_limits_block() argument
118 if ((block[0] == 0x00) && (block[1] == 0x00) && in edid_is_limits_block()
[all …]
/drivers/s390/block/
Ddasd_ioctl.c97 static int dasd_ioctl_quiesce(struct dasd_block *block) in dasd_ioctl_quiesce() argument
102 base = block->base; in dasd_ioctl_quiesce()
118 static int dasd_ioctl_resume(struct dasd_block *block) in dasd_ioctl_resume() argument
123 base = block->base; in dasd_ioctl_resume()
133 dasd_schedule_block_bh(block); in dasd_ioctl_resume()
141 static int dasd_ioctl_abortio(struct dasd_block *block) in dasd_ioctl_abortio() argument
147 base = block->base; in dasd_ioctl_abortio()
155 spin_lock_irqsave(&block->request_queue_lock, flags); in dasd_ioctl_abortio()
156 spin_lock(&block->queue_lock); in dasd_ioctl_abortio()
157 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { in dasd_ioctl_abortio()
[all …]
Ddasd_genhd.c33 int dasd_gendisk_alloc(struct dasd_block *block) in dasd_gendisk_alloc() argument
40 base = block->base; in dasd_gendisk_alloc()
44 gdp = __alloc_disk_node(block->request_queue, NUMA_NO_NODE, in dasd_gendisk_alloc()
81 block->gdp = gdp; in dasd_gendisk_alloc()
82 set_capacity(block->gdp, 0); in dasd_gendisk_alloc()
83 device_add_disk(&base->cdev->dev, block->gdp, NULL); in dasd_gendisk_alloc()
90 void dasd_gendisk_free(struct dasd_block *block) in dasd_gendisk_free() argument
92 if (block->gdp) { in dasd_gendisk_free()
93 del_gendisk(block->gdp); in dasd_gendisk_free()
94 block->gdp->private_data = NULL; in dasd_gendisk_free()
[all …]
Ddasd.c165 struct dasd_block *block; in dasd_alloc_block() local
167 block = kzalloc(sizeof(*block), GFP_ATOMIC); in dasd_alloc_block()
168 if (!block) in dasd_alloc_block()
171 atomic_set(&block->open_count, -1); in dasd_alloc_block()
173 atomic_set(&block->tasklet_scheduled, 0); in dasd_alloc_block()
174 tasklet_init(&block->tasklet, dasd_block_tasklet, in dasd_alloc_block()
175 (unsigned long) block); in dasd_alloc_block()
176 INIT_LIST_HEAD(&block->ccw_queue); in dasd_alloc_block()
177 spin_lock_init(&block->queue_lock); in dasd_alloc_block()
178 INIT_LIST_HEAD(&block->format_list); in dasd_alloc_block()
[all …]
/drivers/misc/
Dsram.c55 static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block, in sram_add_pool() argument
61 NUMA_NO_NODE, block->label); in sram_add_pool()
66 block->size, NUMA_NO_NODE); in sram_add_pool()
75 static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block, in sram_add_export() argument
88 part->battr.size = block->size; in sram_add_export()
93 static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block, in sram_add_partition() argument
105 virt_base = devm_ioremap_resource(sram->dev, &block->res); in sram_add_partition()
107 virt_base = devm_ioremap_resource_wc(sram->dev, &block->res); in sram_add_partition()
110 dev_err(sram->dev, "could not map SRAM at %pr\n", &block->res); in sram_add_partition()
116 part->base = sram->virt_base + block->start; in sram_add_partition()
[all …]
/drivers/md/
Ddm-log-writes.c134 struct pending_block *block; member
198 struct pending_block *block) in free_pending_block() argument
202 for (i = 0; i < block->vec_cnt; i++) { in free_pending_block()
203 if (block->vecs[i].bv_page) in free_pending_block()
204 __free_page(block->vecs[i].bv_page); in free_pending_block()
206 kfree(block->data); in free_pending_block()
207 kfree(block); in free_pending_block()
331 struct pending_block *block, sector_t sector) in log_one_block() argument
338 entry.sector = cpu_to_le64(block->sector); in log_one_block()
339 entry.nr_sectors = cpu_to_le64(block->nr_sectors); in log_one_block()
[all …]
/drivers/net/ethernet/marvell/octeontx2/af/
Drvu.c28 struct rvu_block *block, int lf);
30 struct rvu_block *block, int lf);
93 int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero) in rvu_poll_reg() argument
100 reg = rvu->afreg_base + ((block << 28) | offset); in rvu_poll_reg()
220 int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) in rvu_get_lf() argument
226 for (lf = 0; lf < block->lf.max; lf++) { in rvu_get_lf()
227 if (block->fn_map[lf] == pcifunc) { in rvu_get_lf()
337 struct rvu_block *block, u16 pcifunc, in rvu_update_rsrc_map() argument
344 if (lf >= block->lf.max) { in rvu_update_rsrc_map()
347 __func__, lf, block->name, block->lf.max); in rvu_update_rsrc_map()
[all …]
/drivers/net/ethernet/freescale/dpaa2/
Ddpaa2-switch-flower.c157 dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block, in dpaa2_switch_acl_entry_remove() argument
162 struct ethsw_core *ethsw = block->ethsw; in dpaa2_switch_acl_entry_remove()
183 block->acl_id, acl_entry_cfg); in dpaa2_switch_acl_entry_remove()
199 dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block, in dpaa2_switch_acl_entry_add_to_list() argument
206 if (list_empty(&block->acl_entries)) { in dpaa2_switch_acl_entry_add_to_list()
207 list_add(&entry->list, &block->acl_entries); in dpaa2_switch_acl_entry_add_to_list()
211 list_for_each_safe(pos, n, &block->acl_entries) { in dpaa2_switch_acl_entry_add_to_list()
222 dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block, in dpaa2_switch_acl_entry_get_by_index() argument
228 list_for_each_entry(tmp, &block->acl_entries, list) { in dpaa2_switch_acl_entry_get_by_index()
238 dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block, in dpaa2_switch_acl_entry_set_precedence() argument
[all …]

12345678910>>...22