Home
last modified time | relevance | path

Searched refs:part (Results 1 – 25 of 91) sorted by relevance

1234

/drivers/mtd/
Drfd_ftl.c92 static int build_block_map(struct partition *part, int block_no) in build_block_map() argument
94 struct block *block = &part->blocks[block_no]; in build_block_map()
97 block->offset = part->block_size * block_no; in build_block_map()
99 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { in build_block_map()
106 for (i=0; i<part->data_sectors_per_block; i++) { in build_block_map()
109 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); in build_block_map()
122 if (entry >= part->sector_count) { in build_block_map()
126 part->mbd.mtd->name, block_no, i, entry); in build_block_map()
130 if (part->sector_map[entry] != -1) { in build_block_map()
133 part->mbd.mtd->name, entry); in build_block_map()
[all …]
Dftl.c158 static int scan_header(partition_t *part) in scan_header() argument
164 part->header.FormattedSize = 0; in scan_header()
165 max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size; in scan_header()
169 offset += part->mbd.mtd->erasesize ? : 0x2000) { in scan_header()
171 err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret, in scan_header()
190 if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) { in scan_header()
192 1 << header.EraseUnitSize,part->mbd.mtd->erasesize); in scan_header()
195 part->header = header; in scan_header()
199 static int build_maps(partition_t *part) in build_maps() argument
209 part->DataUnits = le16_to_cpu(part->header.NumEraseUnits) - in build_maps()
[all …]
Dmtdpart.c63 struct mtd_part *part = PART(mtd); in part_read() local
67 stats = part->master->ecc_stats; in part_read()
68 res = part->master->_read(part->master, from + part->offset, len, in part_read()
72 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; in part_read()
74 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; in part_read()
82 struct mtd_part *part = PART(mtd); in part_point() local
84 return part->master->_point(part->master, from + part->offset, len, in part_point()
90 struct mtd_part *part = PART(mtd); in part_unpoint() local
92 return part->master->_unpoint(part->master, from + part->offset, len); in part_unpoint()
100 struct mtd_part *part = PART(mtd); in part_get_unmapped_area() local
[all …]
Dcmdlinepart.c322 struct cmdline_mtd_partition *part; in parse_cmdline_partitions() local
329 for(part = partitions; part; part = part->next) in parse_cmdline_partitions()
331 if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) in parse_cmdline_partitions()
333 for(i = 0, offset = 0; i < part->num_parts; i++) in parse_cmdline_partitions()
335 if (part->parts[i].offset == OFFSET_CONTINUOUS) in parse_cmdline_partitions()
336 part->parts[i].offset = offset; in parse_cmdline_partitions()
338 offset = part->parts[i].offset; in parse_cmdline_partitions()
339 if (part->parts[i].size == SIZE_REMAINING) in parse_cmdline_partitions()
340 part->parts[i].size = master->size - offset; in parse_cmdline_partitions()
341 if (offset + part->parts[i].size > master->size) in parse_cmdline_partitions()
[all …]
Dofpart.c104 } *part; in parse_ofoldpart_partitions() local
114 part = of_get_property(dp, "partitions", &plen); in parse_ofoldpart_partitions()
115 if (!part) in parse_ofoldpart_partitions()
121 nr_parts = plen / sizeof(part[0]); in parse_ofoldpart_partitions()
130 (*pparts)[i].offset = be32_to_cpu(part->offset); in parse_ofoldpart_partitions()
131 (*pparts)[i].size = be32_to_cpu(part->len) & ~1; in parse_ofoldpart_partitions()
133 if (be32_to_cpu(part->len) & 1) in parse_ofoldpart_partitions()
146 part++; in parse_ofoldpart_partitions()
/drivers/misc/sgi-xp/
Dxpc_main.c177 struct xpc_partition *part = (struct xpc_partition *)data; in xpc_timeout_partition_disengage() local
179 DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); in xpc_timeout_partition_disengage()
181 (void)xpc_partition_disengaged(part); in xpc_timeout_partition_disengage()
183 DBUG_ON(part->disengage_timeout != 0); in xpc_timeout_partition_disengage()
184 DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); in xpc_timeout_partition_disengage()
227 struct xpc_partition *part; in xpc_check_remote_hb() local
239 part = &xpc_partitions[partid]; in xpc_check_remote_hb()
241 if (part->act_state == XPC_P_AS_INACTIVE || in xpc_check_remote_hb()
242 part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_check_remote_hb()
246 ret = xpc_arch_ops.get_remote_heartbeat(part); in xpc_check_remote_hb()
[all …]
Dxpc_partition.c270 xpc_partition_disengaged(struct xpc_partition *part) in xpc_partition_disengaged() argument
272 short partid = XPC_PARTID(part); in xpc_partition_disengaged()
276 if (part->disengage_timeout) { in xpc_partition_disengaged()
278 if (time_is_after_jiffies(part->disengage_timeout)) { in xpc_partition_disengaged()
294 part->disengage_timeout = 0; in xpc_partition_disengaged()
298 del_singleshot_timer_sync(&part->disengage_timer); in xpc_partition_disengaged()
300 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && in xpc_partition_disengaged()
301 part->act_state != XPC_P_AS_INACTIVE); in xpc_partition_disengaged()
302 if (part->act_state != XPC_P_AS_INACTIVE) in xpc_partition_disengaged()
303 xpc_wakeup_channel_mgr(part); in xpc_partition_disengaged()
[all …]
Dxpc_uv.c373 struct xpc_partition *part; in xpc_process_activate_IRQ_rcvd_uv() local
380 part = &xpc_partitions[partid]; in xpc_process_activate_IRQ_rcvd_uv()
382 if (part->sn.uv.act_state_req == 0) in xpc_process_activate_IRQ_rcvd_uv()
388 act_state_req = part->sn.uv.act_state_req; in xpc_process_activate_IRQ_rcvd_uv()
389 part->sn.uv.act_state_req = 0; in xpc_process_activate_IRQ_rcvd_uv()
393 if (part->act_state == XPC_P_AS_INACTIVE) in xpc_process_activate_IRQ_rcvd_uv()
394 xpc_activate_partition(part); in xpc_process_activate_IRQ_rcvd_uv()
395 else if (part->act_state == XPC_P_AS_DEACTIVATING) in xpc_process_activate_IRQ_rcvd_uv()
396 XPC_DEACTIVATE_PARTITION(part, xpReactivating); in xpc_process_activate_IRQ_rcvd_uv()
399 if (part->act_state == XPC_P_AS_INACTIVE) in xpc_process_activate_IRQ_rcvd_uv()
[all …]
Dxpc_channel.c82 struct xpc_partition *part = &xpc_partitions[ch->partid]; in xpc_process_disconnect() local
101 if (part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_process_disconnect()
157 atomic_dec(&part->nchannels_active); in xpc_process_disconnect()
168 if (part->act_state != XPC_P_AS_DEACTIVATING) { in xpc_process_disconnect()
170 spin_lock(&part->chctl_lock); in xpc_process_disconnect()
171 part->chctl.flags[ch->number] |= in xpc_process_disconnect()
173 spin_unlock(&part->chctl_lock); in xpc_process_disconnect()
183 xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, in xpc_process_openclose_chctl_flags() argument
188 &part->remote_openclose_args[ch_number]; in xpc_process_openclose_chctl_flags()
189 struct xpc_channel *ch = &part->channels[ch_number]; in xpc_process_openclose_chctl_flags()
[all …]
Dxpc_sn2.c268 xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part) in xpc_check_for_sent_chctl_flags_sn2() argument
273 chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2. in xpc_check_for_sent_chctl_flags_sn2()
278 spin_lock_irqsave(&part->chctl_lock, irq_flags); in xpc_check_for_sent_chctl_flags_sn2()
279 part->chctl.all_flags |= chctl.all_flags; in xpc_check_for_sent_chctl_flags_sn2()
280 spin_unlock_irqrestore(&part->chctl_lock, irq_flags); in xpc_check_for_sent_chctl_flags_sn2()
283 "0x%llx\n", XPC_PARTID(part), chctl.all_flags); in xpc_check_for_sent_chctl_flags_sn2()
285 xpc_wakeup_channel_mgr(part); in xpc_check_for_sent_chctl_flags_sn2()
308 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_handle_notify_IRQ_sn2() local
312 if (xpc_part_ref(part)) { in xpc_handle_notify_IRQ_sn2()
313 xpc_check_for_sent_chctl_flags_sn2(part); in xpc_handle_notify_IRQ_sn2()
[all …]
Dxpc.h937 xpc_wakeup_channel_mgr(struct xpc_partition *part) in xpc_wakeup_channel_mgr() argument
939 if (atomic_inc_return(&part->channel_mgr_requests) == 1) in xpc_wakeup_channel_mgr()
940 wake_up(&part->channel_mgr_wq); in xpc_wakeup_channel_mgr()
971 xpc_part_deref(struct xpc_partition *part) in xpc_part_deref() argument
973 s32 refs = atomic_dec_return(&part->references); in xpc_part_deref()
976 if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN) in xpc_part_deref()
977 wake_up(&part->teardown_wq); in xpc_part_deref()
981 xpc_part_ref(struct xpc_partition *part) in xpc_part_ref() argument
985 atomic_inc(&part->references); in xpc_part_ref()
986 setup = (part->setup_state == XPC_P_SS_SETUP); in xpc_part_ref()
[all …]
/drivers/acpi/acpica/
Dutmath.c68 struct uint64_struct part; member
112 ACPI_DIV_64_BY_32(0, dividend_ovl.part.hi, divisor, in acpi_ut_short_divide()
113 quotient.part.hi, remainder32); in acpi_ut_short_divide()
114 ACPI_DIV_64_BY_32(remainder32, dividend_ovl.part.lo, divisor, in acpi_ut_short_divide()
115 quotient.part.lo, remainder32); in acpi_ut_short_divide()
169 if (divisor.part.hi == 0) { in acpi_ut_divide()
174 remainder.part.hi = 0; in acpi_ut_divide()
180 ACPI_DIV_64_BY_32(0, dividend.part.hi, divisor.part.lo, in acpi_ut_divide()
181 quotient.part.hi, partial1); in acpi_ut_divide()
182 ACPI_DIV_64_BY_32(partial1, dividend.part.lo, divisor.part.lo, in acpi_ut_divide()
[all …]
/drivers/net/ethernet/sfc/
Dmtd.c57 struct efx_mtd_partition part[0]; member
60 #define efx_for_each_partition(part, efx_mtd) \ argument
61 for ((part) = &(efx_mtd)->part[0]; \
62 (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
63 (part)++)
74 efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible) in efx_spi_slow_wait() argument
76 struct efx_mtd *efx_mtd = part->mtd.priv; in efx_spi_slow_wait()
96 pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name); in efx_spi_slow_wait()
136 efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len) in efx_spi_erase() argument
138 struct efx_mtd *efx_mtd = part->mtd.priv; in efx_spi_erase()
[all …]
/drivers/gpu/drm/nouveau/
Dnvc0_vram.c114 int ret, part; in nvc0_vram_init() local
123 for (part = 0; part < parts; part++) { in nvc0_vram_init()
124 if (!(pmask & (1 << part))) { in nvc0_vram_init()
125 u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000)); in nvc0_vram_init()
132 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize); in nvc0_vram_init()
Dnouveau_sgdma.c222 u32 part = (cnt > max) ? max : cnt; in nv44_sgdma_bind() local
223 nv44_sgdma_fill(pgt, list, pte, part); in nv44_sgdma_bind()
224 pte += (part << 2); in nv44_sgdma_bind()
225 list += part; in nv44_sgdma_bind()
226 cnt -= part; in nv44_sgdma_bind()
258 u32 part = (cnt > max) ? max : cnt; in nv44_sgdma_unbind() local
259 nv44_sgdma_fill(pgt, NULL, pte, part); in nv44_sgdma_unbind()
260 pte += (part << 2); in nv44_sgdma_unbind()
261 cnt -= part; in nv44_sgdma_unbind()
/drivers/macintosh/
Dnvram.c84 int part, offset; in nvram_ioctl() local
85 if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) in nvram_ioctl()
87 if (part < pmac_nvram_OF || part > pmac_nvram_NR) in nvram_ioctl()
89 offset = pmac_get_partition(part); in nvram_ioctl()
Dsmu.c1020 const struct smu_sdbp_header *part; in __smu_get_sdb_partition() local
1037 part = of_get_property(smu->of_node, pname, size); in __smu_get_sdb_partition()
1038 if (part == NULL) { in __smu_get_sdb_partition()
1040 part = smu_create_sdb_partition(id); in __smu_get_sdb_partition()
1041 if (part != NULL && size) in __smu_get_sdb_partition()
1042 *size = part->len << 2; in __smu_get_sdb_partition()
1045 return part; in __smu_get_sdb_partition()
1128 const struct smu_sdbp_header *part; in smu_write() local
1129 part = __smu_get_sdb_partition(hdr.cmd, NULL, 1); in smu_write()
1130 if (part == NULL) in smu_write()
[all …]
/drivers/char/
Dgeneric_nvram.c100 int part, offset; in nvram_ioctl() local
104 if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) in nvram_ioctl()
106 if (part < pmac_nvram_OF || part > pmac_nvram_NR) in nvram_ioctl()
108 offset = pmac_get_partition(part); in nvram_ioctl()
/drivers/net/wireless/wl12xx/
Dio.h101 if ((addr >= wl->part.reg.start) && in wl1271_translate_addr()
102 (addr < wl->part.reg.start + wl->part.reg.size)) in wl1271_translate_addr()
103 return addr - wl->part.reg.start + wl->part.mem.size; in wl1271_translate_addr()
105 return addr - wl->part.mem.start; in wl1271_translate_addr()
/drivers/mmc/card/
Dblock.c89 struct list_head part; member
222 list_for_each_entry(part_md, &md->part, part) in power_ro_lock_store()
1525 INIT_LIST_HEAD(&md->part); in mmc_blk_alloc_req()
1627 list_add(&part_md->part, &md->part); in mmc_blk_alloc_part()
1651 if (card->part[idx].size) { in mmc_blk_alloc_parts()
1653 card->part[idx].part_cfg, in mmc_blk_alloc_parts()
1654 card->part[idx].size >> 9, in mmc_blk_alloc_parts()
1655 card->part[idx].force_ro, in mmc_blk_alloc_parts()
1656 card->part[idx].name, in mmc_blk_alloc_parts()
1657 card->part[idx].area_type); in mmc_blk_alloc_parts()
[all …]
/drivers/sn/
Dioc3.c280 unsigned char data[64],part[32]; in read_nic() local
292 part[j++] = data[i+11]; in read_nic()
295 part[j++] = data[i+32]; in read_nic()
296 part[j] = 0; in read_nic()
298 if(!strncmp(part, "060-0035-", 9)) in read_nic()
300 if(!strncmp(part, "060-0038-", 9)) in read_nic()
302 strcpy(idd->nic_part, part); in read_nic()
/drivers/isdn/hisax/
Dhscx.c261 inithscxisac(struct IsdnCardState *cs, int part) in inithscxisac() argument
263 if (part & 1) { in inithscxisac()
269 if (part & 2) { in inithscxisac()
/drivers/firmware/
Defivars.c671 unsigned int part, size; in efi_pstore_read() local
680 if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) { in efi_pstore_read()
681 *id = part; in efi_pstore_read()
704 unsigned int part, size_t size, struct pstore_info *psi) in efi_pstore_write() argument
714 sprintf(stub_name, "dump-type%u-%u-", type, part); in efi_pstore_write()
766 *id = part; in efi_pstore_write()
797 unsigned int part, size_t size, struct pstore_info *psi) in efi_pstore_write() argument
/drivers/staging/omapdrm/
DTODO22 . Review DSS vs KMS mismatches. The omap_dss_device is sort of part encoder,
23 part connector. Which results in a bit of duct tape to fwd calls from
/drivers/staging/asus_oled/
DREADME100 can't be larger than that (actually they can, but only part of them will be displayed ;) )
111 of the display, and the lower half will be empty. After few seconds upper part will
112 stop flashing (but that part of the picture will remain there), and the lower
115 upper part. It is not mine idea, this is just the way Asus' display work ;)

1234