Home
last modified time | relevance | path

Searched refs:part (Results 1 – 25 of 122) sorted by relevance

12345

/drivers/mtd/
Drfd_ftl.c92 static int build_block_map(struct partition *part, int block_no) in build_block_map() argument
94 struct block *block = &part->blocks[block_no]; in build_block_map()
97 block->offset = part->block_size * block_no; in build_block_map()
99 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { in build_block_map()
106 for (i=0; i<part->data_sectors_per_block; i++) { in build_block_map()
109 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); in build_block_map()
122 if (entry >= part->sector_count) { in build_block_map()
126 part->mbd.mtd->name, block_no, i, entry); in build_block_map()
130 if (part->sector_map[entry] != -1) { in build_block_map()
133 part->mbd.mtd->name, entry); in build_block_map()
[all …]
Dftl.c157 static int scan_header(partition_t *part) in scan_header() argument
163 part->header.FormattedSize = 0; in scan_header()
164 max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size; in scan_header()
168 offset += part->mbd.mtd->erasesize ? : 0x2000) { in scan_header()
170 err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret, in scan_header()
189 if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) { in scan_header()
191 1 << header.EraseUnitSize,part->mbd.mtd->erasesize); in scan_header()
194 part->header = header; in scan_header()
198 static int build_maps(partition_t *part) in build_maps() argument
208 part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) - in build_maps()
[all …]
Dmtdpart.c66 struct mtd_part *part = mtd_to_part(mtd); in part_read() local
70 stats = part->master->ecc_stats; in part_read()
71 res = part->master->_read(part->master, from + part->offset, len, in part_read()
75 part->master->ecc_stats.failed - stats.failed; in part_read()
78 part->master->ecc_stats.corrected - stats.corrected; in part_read()
85 struct mtd_part *part = mtd_to_part(mtd); in part_point() local
87 return part->master->_point(part->master, from + part->offset, len, in part_point()
93 struct mtd_part *part = mtd_to_part(mtd); in part_unpoint() local
95 return part->master->_unpoint(part->master, from + part->offset, len); in part_unpoint()
103 struct mtd_part *part = mtd_to_part(mtd); in part_get_unmapped_area() local
[all …]
Dcmdlinepart.c312 struct cmdline_mtd_partition *part; in parse_cmdline_partitions() local
326 for (part = partitions; part; part = part->next) { in parse_cmdline_partitions()
327 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) in parse_cmdline_partitions()
331 if (!part) in parse_cmdline_partitions()
334 for (i = 0, offset = 0; i < part->num_parts; i++) { in parse_cmdline_partitions()
335 if (part->parts[i].offset == OFFSET_CONTINUOUS) in parse_cmdline_partitions()
336 part->parts[i].offset = offset; in parse_cmdline_partitions()
338 offset = part->parts[i].offset; in parse_cmdline_partitions()
340 if (part->parts[i].size == SIZE_REMAINING) in parse_cmdline_partitions()
341 part->parts[i].size = master->size - offset; in parse_cmdline_partitions()
[all …]
Dofpart.c156 } *part; in parse_ofoldpart_partitions() local
164 part = of_get_property(dp, "partitions", &plen); in parse_ofoldpart_partitions()
165 if (!part) in parse_ofoldpart_partitions()
171 nr_parts = plen / sizeof(part[0]); in parse_ofoldpart_partitions()
180 parts[i].offset = be32_to_cpu(part->offset); in parse_ofoldpart_partitions()
181 parts[i].size = be32_to_cpu(part->len) & ~1; in parse_ofoldpart_partitions()
183 if (be32_to_cpu(part->len) & 1) in parse_ofoldpart_partitions()
196 part++; in parse_ofoldpart_partitions()
Dbcm47xxpart.c56 static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name, in bcm47xxpart_add_part() argument
59 part->name = name; in bcm47xxpart_add_part()
60 part->offset = offset; in bcm47xxpart_add_part()
61 part->mask_flags = mask_flags; in bcm47xxpart_add_part()
/drivers/misc/sgi-xp/
Dxpc_main.c177 struct xpc_partition *part = (struct xpc_partition *)data; in xpc_timeout_partition_disengage() local
179 DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); in xpc_timeout_partition_disengage()
181 (void)xpc_partition_disengaged(part); in xpc_timeout_partition_disengage()
183 DBUG_ON(part->disengage_timeout != 0); in xpc_timeout_partition_disengage()
184 DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); in xpc_timeout_partition_disengage()
227 struct xpc_partition *part; in xpc_check_remote_hb() local
239 part = &xpc_partitions[partid]; in xpc_check_remote_hb()
241 if (part->act_state == XPC_P_AS_INACTIVE || in xpc_check_remote_hb()
242 part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_check_remote_hb()
246 ret = xpc_arch_ops.get_remote_heartbeat(part); in xpc_check_remote_hb()
[all …]
Dxpc_partition.c270 xpc_partition_disengaged(struct xpc_partition *part) in xpc_partition_disengaged() argument
272 short partid = XPC_PARTID(part); in xpc_partition_disengaged()
276 if (part->disengage_timeout) { in xpc_partition_disengaged()
278 if (time_is_after_jiffies(part->disengage_timeout)) { in xpc_partition_disengaged()
294 part->disengage_timeout = 0; in xpc_partition_disengaged()
298 del_singleshot_timer_sync(&part->disengage_timer); in xpc_partition_disengaged()
300 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && in xpc_partition_disengaged()
301 part->act_state != XPC_P_AS_INACTIVE); in xpc_partition_disengaged()
302 if (part->act_state != XPC_P_AS_INACTIVE) in xpc_partition_disengaged()
303 xpc_wakeup_channel_mgr(part); in xpc_partition_disengaged()
[all …]
Dxpc_uv.c373 struct xpc_partition *part; in xpc_process_activate_IRQ_rcvd_uv() local
380 part = &xpc_partitions[partid]; in xpc_process_activate_IRQ_rcvd_uv()
382 if (part->sn.uv.act_state_req == 0) in xpc_process_activate_IRQ_rcvd_uv()
388 act_state_req = part->sn.uv.act_state_req; in xpc_process_activate_IRQ_rcvd_uv()
389 part->sn.uv.act_state_req = 0; in xpc_process_activate_IRQ_rcvd_uv()
393 if (part->act_state == XPC_P_AS_INACTIVE) in xpc_process_activate_IRQ_rcvd_uv()
394 xpc_activate_partition(part); in xpc_process_activate_IRQ_rcvd_uv()
395 else if (part->act_state == XPC_P_AS_DEACTIVATING) in xpc_process_activate_IRQ_rcvd_uv()
396 XPC_DEACTIVATE_PARTITION(part, xpReactivating); in xpc_process_activate_IRQ_rcvd_uv()
399 if (part->act_state == XPC_P_AS_INACTIVE) in xpc_process_activate_IRQ_rcvd_uv()
[all …]
Dxpc_channel.c82 struct xpc_partition *part = &xpc_partitions[ch->partid]; in xpc_process_disconnect() local
101 if (part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_process_disconnect()
157 atomic_dec(&part->nchannels_active); in xpc_process_disconnect()
168 if (part->act_state != XPC_P_AS_DEACTIVATING) { in xpc_process_disconnect()
170 spin_lock(&part->chctl_lock); in xpc_process_disconnect()
171 part->chctl.flags[ch->number] |= in xpc_process_disconnect()
173 spin_unlock(&part->chctl_lock); in xpc_process_disconnect()
183 xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, in xpc_process_openclose_chctl_flags() argument
188 &part->remote_openclose_args[ch_number]; in xpc_process_openclose_chctl_flags()
189 struct xpc_channel *ch = &part->channels[ch_number]; in xpc_process_openclose_chctl_flags()
[all …]
Dxpc_sn2.c268 xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part) in xpc_check_for_sent_chctl_flags_sn2() argument
273 chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2. in xpc_check_for_sent_chctl_flags_sn2()
278 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_check_for_sent_chctl_flags_sn2()
279 part->chctl.all_flags |= chctl.all_flags; in xpc_check_for_sent_chctl_flags_sn2()
280 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_check_for_sent_chctl_flags_sn2()
283 "0x%llx\n", XPC_PARTID(part), chctl.all_flags); in xpc_check_for_sent_chctl_flags_sn2()
285 xpc_wakeup_channel_mgr(part); in xpc_check_for_sent_chctl_flags_sn2()
308 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_handle_notify_IRQ_sn2() local
312 if (xpc_part_ref(part)) { in xpc_handle_notify_IRQ_sn2()
313 xpc_check_for_sent_chctl_flags_sn2(part); in xpc_handle_notify_IRQ_sn2()
[all …]
Dxpc.h937 xpc_wakeup_channel_mgr(struct xpc_partition *part) in xpc_wakeup_channel_mgr() argument
939 if (atomic_inc_return(&part->channel_mgr_requests) == 1) in xpc_wakeup_channel_mgr()
940 wake_up(&part->channel_mgr_wq); in xpc_wakeup_channel_mgr()
971 xpc_part_deref(struct xpc_partition *part) in xpc_part_deref() argument
973 s32 refs = atomic_dec_return(&part->references); in xpc_part_deref()
976 if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN) in xpc_part_deref()
977 wake_up(&part->teardown_wq); in xpc_part_deref()
981 xpc_part_ref(struct xpc_partition *part) in xpc_part_ref() argument
985 atomic_inc(&part->references); in xpc_part_ref()
986 setup = (part->setup_state == XPC_P_SS_SETUP); in xpc_part_ref()
[all …]
/drivers/acpi/acpica/
Dutmath.c68 struct uint64_struct part; member
112 ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor, in acpi_ut_short_divide()
113 quotient.part.hi, remainder32); in acpi_ut_short_divide()
115 ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor, in acpi_ut_short_divide()
116 quotient.part.lo, remainder32); in acpi_ut_short_divide()
170 if (divisor.part.hi == 0) { in acpi_ut_divide()
175 remainder.part.hi = 0; in acpi_ut_divide()
181 ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo, in acpi_ut_divide()
182 quotient.part.hi, partial1); in acpi_ut_divide()
184 ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo, in acpi_ut_divide()
[all …]
/drivers/irqchip/
Dirq-partition-percpu.c36 static bool partition_check_cpu(struct partition_desc *part, in partition_check_cpu() argument
39 return cpumask_test_cpu(cpu, &part->parts[hwirq].mask); in partition_check_cpu()
44 struct partition_desc *part = irq_data_get_irq_chip_data(d); in partition_irq_mask() local
45 struct irq_chip *chip = irq_desc_get_chip(part->chained_desc); in partition_irq_mask()
46 struct irq_data *data = irq_desc_get_irq_data(part->chained_desc); in partition_irq_mask()
48 if (partition_check_cpu(part, smp_processor_id(), d->hwirq) && in partition_irq_mask()
55 struct partition_desc *part = irq_data_get_irq_chip_data(d); in partition_irq_unmask() local
56 struct irq_chip *chip = irq_desc_get_chip(part->chained_desc); in partition_irq_unmask()
57 struct irq_data *data = irq_desc_get_irq_data(part->chained_desc); in partition_irq_unmask()
59 if (partition_check_cpu(part, smp_processor_id(), d->hwirq) && in partition_irq_unmask()
[all …]
/drivers/net/ethernet/sfc/
Dmtd.c42 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); in efx_mtd_sync() local
49 part->name, part->dev_type_name, rc); in efx_mtd_sync()
52 static void efx_mtd_remove_partition(struct efx_mtd_partition *part) in efx_mtd_remove_partition() argument
57 rc = mtd_device_unregister(&part->mtd); in efx_mtd_remove_partition()
63 list_del(&part->node); in efx_mtd_remove_partition()
69 struct efx_mtd_partition *part; in efx_mtd_add() local
73 part = (struct efx_mtd_partition *)((char *)parts + in efx_mtd_add()
76 part->mtd.writesize = 1; in efx_mtd_add()
78 part->mtd.owner = THIS_MODULE; in efx_mtd_add()
79 part->mtd.priv = efx; in efx_mtd_add()
[all …]
Dmcdi.c2160 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); in efx_mcdi_mtd_read() local
2169 rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset, in efx_mcdi_mtd_read()
2183 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); in efx_mcdi_mtd_erase() local
2187 size_t chunk = part->common.mtd.erasesize; in efx_mcdi_mtd_erase()
2190 if (!part->updating) { in efx_mcdi_mtd_erase()
2191 rc = efx_mcdi_nvram_update_start(efx, part->nvram_type); in efx_mcdi_mtd_erase()
2194 part->updating = true; in efx_mcdi_mtd_erase()
2201 rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset, in efx_mcdi_mtd_erase()
2214 struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd); in efx_mcdi_mtd_write() local
2221 if (!part->updating) { in efx_mcdi_mtd_write()
[all …]
/drivers/net/wireless/ti/wlcore/
Dio.c71 struct wlcore_partition_set *part = &wl->curr_part; in wlcore_translate_addr() local
83 if ((addr >= part->mem.start) && in wlcore_translate_addr()
84 (addr < part->mem.start + part->mem.size)) in wlcore_translate_addr()
85 return addr - part->mem.start; in wlcore_translate_addr()
86 else if ((addr >= part->reg.start) && in wlcore_translate_addr()
87 (addr < part->reg.start + part->reg.size)) in wlcore_translate_addr()
88 return addr - part->reg.start + part->mem.size; in wlcore_translate_addr()
89 else if ((addr >= part->mem2.start) && in wlcore_translate_addr()
90 (addr < part->mem2.start + part->mem2.size)) in wlcore_translate_addr()
91 return addr - part->mem2.start + part->mem.size + in wlcore_translate_addr()
[all …]
/drivers/misc/
Dsram.c63 struct sram_partition *part; in sram_read() local
65 part = container_of(attr, struct sram_partition, battr); in sram_read()
67 mutex_lock(&part->lock); in sram_read()
68 memcpy_fromio(buf, part->base + pos, count); in sram_read()
69 mutex_unlock(&part->lock); in sram_read()
78 struct sram_partition *part; in sram_write() local
80 part = container_of(attr, struct sram_partition, battr); in sram_write()
82 mutex_lock(&part->lock); in sram_write()
83 memcpy_toio(part->base + pos, buf, count); in sram_write()
84 mutex_unlock(&part->lock); in sram_write()
[all …]
/drivers/firmware/efi/
Defi-pstore.c42 unsigned int part, int count) in generic_id() argument
44 return ((u64) timestamp * 100 + part) * 1000 + count; in generic_id()
54 unsigned int part; in efi_pstore_read_func() local
64 cb_data->type, &part, &cnt, &time, &data_type) == 5) { in efi_pstore_read_func()
65 *cb_data->id = generic_id(time, part, cnt); in efi_pstore_read_func()
75 cb_data->type, &part, &cnt, &time) == 4) { in efi_pstore_read_func()
76 *cb_data->id = generic_id(time, part, cnt); in efi_pstore_read_func()
83 cb_data->type, &part, &time) == 3) { in efi_pstore_read_func()
89 *cb_data->id = generic_id(time, part, 0); in efi_pstore_read_func()
263 unsigned int part, int count, bool compressed, size_t size, in efi_pstore_write() argument
[all …]
/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
Dramgp100.c97 u64 part, size = 0, comm = ~0ULL; in gp100_ram_new() local
105 part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000)); in gp100_ram_new()
106 nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part); in gp100_ram_new()
107 part = part << 20; in gp100_ram_new()
108 if (part != comm) { in gp100_ram_new()
111 comm = min(comm, part); in gp100_ram_new()
113 size = size + part; in gp100_ram_new()
/drivers/gpu/drm/nouveau/nvkm/engine/dma/
Dusernv50.c78 u32 user, part, comp, kind; in nv50_dmaobj_new() local
97 args->v0.priv, args->v0.part, args->v0.comp, in nv50_dmaobj_new()
100 part = args->v0.part; in nv50_dmaobj_new()
107 part = NV50_DMA_V0_PART_256; in nv50_dmaobj_new()
112 part = NV50_DMA_V0_PART_VM; in nv50_dmaobj_new()
119 if (user > 2 || part > 2 || comp > 3 || kind > 0x7f) in nv50_dmaobj_new()
123 dmaobj->flags5 = (part << 16); in nv50_dmaobj_new()
/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dnv44.c94 u32 part = (cnt > max) ? max : cnt; in nv44_vm_map_sg() local
95 nv44_vm_fill(pgt, mmu->null, list, pte, part); in nv44_vm_map_sg()
96 pte += part; in nv44_vm_map_sg()
97 list += part; in nv44_vm_map_sg()
98 cnt -= part; in nv44_vm_map_sg()
124 u32 part = (cnt > max) ? max : cnt; in nv44_vm_unmap() local
125 nv44_vm_fill(pgt, mmu->null, NULL, pte, part); in nv44_vm_unmap()
126 pte += part; in nv44_vm_unmap()
127 cnt -= part; in nv44_vm_unmap()
/drivers/char/
Dgeneric_nvram.c85 int part, offset; in nvram_ioctl() local
89 if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) in nvram_ioctl()
91 if (part < pmac_nvram_OF || part > pmac_nvram_NR) in nvram_ioctl()
93 offset = pmac_get_partition(part); in nvram_ioctl()
/drivers/staging/lustre/lnet/libcfs/linux/
Dlinux-cpu.c100 struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; in cfs_cpt_table_free() local
102 if (part->cpt_nodemask) { in cfs_cpt_table_free()
103 LIBCFS_FREE(part->cpt_nodemask, in cfs_cpt_table_free()
104 sizeof(*part->cpt_nodemask)); in cfs_cpt_table_free()
107 if (part->cpt_cpumask) in cfs_cpt_table_free()
108 LIBCFS_FREE(part->cpt_cpumask, cpumask_size()); in cfs_cpt_table_free()
156 struct cfs_cpu_partition *part = &cptab->ctb_parts[i]; in cfs_cpt_table_alloc() local
158 LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size()); in cfs_cpt_table_alloc()
159 LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask)); in cfs_cpt_table_alloc()
160 if (!part->cpt_cpumask || !part->cpt_nodemask) in cfs_cpt_table_alloc()
[all …]
/drivers/vhost/
Dvringh.c230 size_t part, len = sizeof(struct vring_desc); in slow_copy() local
236 part = len; in slow_copy()
239 if (!rcheck(vrh, addr, &part, range, getrange)) in slow_copy()
242 err = copy(dst, src, part); in slow_copy()
246 dst += part; in slow_copy()
247 src += part; in slow_copy()
248 len -= part; in slow_copy()
420 u16 part = vrh->vring.num - off; in __vringh_complete() local
421 err = putused(&used_ring->ring[off], used, part); in __vringh_complete()
423 err = putused(&used_ring->ring[0], used + part, in __vringh_complete()
[all …]

12345