/arch/x86/kernel/ |
D | resource.c | 28 static void remove_e820_regions(struct resource *avail) in remove_e820_regions() argument 33 struct resource orig = *avail; in remove_e820_regions() 43 resource_clip(avail, e820_start, e820_end); in remove_e820_regions() 44 if (orig.start != avail->start || orig.end != avail->end) { in remove_e820_regions() 47 if (avail->end > avail->start) in remove_e820_regions() 54 &avail->start, &avail->end); in remove_e820_regions() 55 orig = *avail; in remove_e820_regions() 60 void arch_remove_reservations(struct resource *avail) in arch_remove_reservations() argument 67 if (avail->flags & IORESOURCE_MEM) { in arch_remove_reservations() 68 resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END); in arch_remove_reservations() [all …]
|
/arch/mips/cavium-octeon/crypto/ |
D | octeon-md5.c | 85 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); in octeon_md5_update() local 91 if (avail > len) { in octeon_md5_update() 92 memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), in octeon_md5_update() 97 memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, in octeon_md5_update() 98 avail); in octeon_md5_update() 104 data += avail; in octeon_md5_update() 105 len -= avail; in octeon_md5_update()
|
/arch/x86/events/ |
D | probe.c | 21 unsigned long avail = 0; in perf_msr_probe() local 58 avail |= BIT(bit); in perf_msr_probe() 61 return avail; in perf_msr_probe()
|
/arch/powerpc/crypto/ |
D | md5-glue.c | 48 unsigned int avail = 64 - offset; in ppc_md5_update() local 53 if (avail > len) { in ppc_md5_update() 59 memcpy((char *)sctx->block + offset, src, avail); in ppc_md5_update() 61 len -= avail; in ppc_md5_update() 62 src += avail; in ppc_md5_update()
|
D | sha1-spe-glue.c | 64 const unsigned int avail = 64 - offset; in ppc_spe_sha1_update() local 68 if (avail > len) { in ppc_spe_sha1_update() 77 memcpy((char *)sctx->buffer + offset, src, avail); in ppc_spe_sha1_update() 83 len -= avail; in ppc_spe_sha1_update() 84 src += avail; in ppc_spe_sha1_update()
|
D | sha256-spe-glue.c | 65 const unsigned int avail = 64 - offset; in ppc_spe_sha256_update() local 69 if (avail > len) { in ppc_spe_sha256_update() 78 memcpy((char *)sctx->buf + offset, src, avail); in ppc_spe_sha256_update() 84 len -= avail; in ppc_spe_sha256_update() 85 src += avail; in ppc_spe_sha256_update()
|
/arch/ia64/kernel/ |
D | palinfo.c | 519 static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control, in feature_set_info() argument 526 for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) { in feature_set_info() 530 if (!(avail & 0x1)) /* Print only bits that are available */ in feature_set_info() 536 avail & 0x1 ? (status & 0x1 ? in feature_set_info() 538 avail & 0x1 ? (control & 0x1 ? in feature_set_info() 544 avail & 0x1 ? (status & 0x1 ? in feature_set_info() 546 avail & 0x1 ? (control & 0x1 ? in feature_set_info() 554 u64 avail=1, status=1, control=1, feature_set=0; in processor_info() local 558 ret = ia64_pal_proc_get_features(&avail, &status, &control, in processor_info() 568 feature_set_info(m, avail, status, control, feature_set); in processor_info() [all …]
|
/arch/powerpc/platforms/powernv/ |
D | opal-msglog.c | 39 uint32_t out_pos, avail; in memcons_copy() local 58 avail = be32_to_cpu(mc->obuf_size) - out_pos; in memcons_copy() 61 conbuf + out_pos, avail); in memcons_copy() 69 pos -= avail; in memcons_copy()
|
D | opal-core.c | 166 ssize_t tsz, avail; in read_opalcore() local 173 avail = oc_conf->opalcore_size - pos; in read_opalcore() 174 if (count > avail) in read_opalcore() 175 count = avail; in read_opalcore()
|
/arch/um/kernel/ |
D | um_arch.c | 309 unsigned long avail, diff; in linux_main() local 394 avail = stack - start_vm; in linux_main() 395 if (physmem_size > avail) in linux_main() 396 virtmem_size = avail; in linux_main()
|
/arch/powerpc/kernel/ |
D | setup-common.c | 488 bool avail; in smp_setup_cpu_maps() local 493 avail = of_device_is_available(dn); in smp_setup_cpu_maps() 494 if (!avail) in smp_setup_cpu_maps() 495 avail = !of_property_match_string(dn, in smp_setup_cpu_maps() 498 set_cpu_present(cpu, avail); in smp_setup_cpu_maps()
|
/arch/powerpc/platforms/pseries/ |
D | vio.c | 283 size_t avail, delta, tmp; in vio_cmo_entitlement_update() local 309 avail = vio_cmo.excess.free; in vio_cmo_entitlement_update() 316 if (avail >= delta) in vio_cmo_entitlement_update() 322 avail += viodev->cmo.entitled - in vio_cmo_entitlement_update() 327 if (delta <= avail) { in vio_cmo_entitlement_update() 392 size_t avail = 0, level, chunk, need; in vio_cmo_balance() local 410 avail = cmo->entitled - cmo->spare; in vio_cmo_balance() 416 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT); in vio_cmo_balance() 425 while (avail) { in vio_cmo_balance() 440 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK); in vio_cmo_balance() [all …]
|
/arch/s390/kernel/ |
D | smp.c | 770 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, in smp_add_core() argument 780 cpu = cpumask_first(avail); in smp_add_core() 797 cpumask_clear_cpu(cpu, avail); in smp_add_core() 798 cpu = cpumask_next(cpu, avail); in smp_add_core() 806 static cpumask_t avail; in __smp_rescan_cpus() local 814 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); in __smp_rescan_cpus() 824 nr += smp_add_core(core, &avail, true, early); in __smp_rescan_cpus() 831 nr += smp_add_core(&info->core[i], &avail, configured, early); in __smp_rescan_cpus()
|
/arch/um/drivers/ |
D | vhost_user.h | 92 u64 desc, used, avail, log; member
|
D | virtio_uml.c | 716 u32 index, u64 desc, u64 used, u64 avail, in vhost_user_set_vring_addr() argument 725 .payload.vring_addr.avail = avail, in vhost_user_set_vring_addr()
|
/arch/s390/include/asm/ |
D | airq.h | 34 unsigned long *avail; /* Allocation bit mask for the bit vector */ member
|
/arch/powerpc/platforms/cell/spufs/ |
D | backing_ops.c | 146 int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8; in spu_backing_wbox_write() local 152 BUG_ON(avail != (4 - slot)); in spu_backing_wbox_write()
|
/arch/mips/include/asm/octeon/ |
D | cvmx-npi-defs.h | 1969 uint64_t avail:32; member 1971 uint64_t avail:32; 1984 uint64_t avail:32; member 1986 uint64_t avail:32;
|
/arch/x86/kvm/mmu/ |
D | mmu.c | 2710 unsigned long avail = kvm_mmu_available_pages(vcpu->kvm); in make_mmu_pages_available() local 2712 if (likely(avail >= KVM_MIN_FREE_MMU_PAGES)) in make_mmu_pages_available() 2715 kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail); in make_mmu_pages_available()
|