/kernel/irq/ |
D | timings.c | 282 for (i = irqts->count < IRQ_TIMINGS_SIZE ? \ 283 0 : irqts->count & IRQ_TIMINGS_MASK, \ 284 irqts->count = min(IRQ_TIMINGS_SIZE, \ 285 irqts->count); \ 286 irqts->count > 0; irqts->count--, \ 294 int count; member 384 int index, i, period_max, count, start, min = INT_MAX; in __irq_timings_next_event() local 387 irqs->count = irqs->last_ts = 0; in __irq_timings_next_event() 396 period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ? in __irq_timings_next_event() 397 PREDICTION_PERIOD_MAX : irqs->count / 3; in __irq_timings_next_event() [all …]
|
D | proc.c | 138 const char __user *buffer, size_t count, loff_t *pos) in write_irq_affinity() argument 151 err = cpumask_parselist_user(buffer, count, new_value); in write_irq_affinity() 153 err = cpumask_parse_user(buffer, count, new_value); in write_irq_affinity() 167 err = irq_select_affinity_usr(irq) ? -EINVAL : count; in write_irq_affinity() 171 err = count; in write_irq_affinity() 180 const char __user *buffer, size_t count, loff_t *pos) in irq_affinity_proc_write() argument 182 return write_irq_affinity(0, file, buffer, count, pos); in irq_affinity_proc_write() 186 const char __user *buffer, size_t count, loff_t *pos) in irq_affinity_list_proc_write() argument 188 return write_irq_affinity(1, file, buffer, count, pos); in irq_affinity_list_proc_write() 236 const char __user *buffer, size_t count, loff_t *ppos) in default_affinity_write() argument [all …]
|
/kernel/locking/ |
D | rwsem.c | 110 #c, atomic_long_read(&(sem)->count), \ 229 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned() local 231 if (count & RWSEM_WRITER_MASK) in is_rwsem_reader_owned() 279 long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock() 344 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem() 432 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake() 444 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake() 457 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake() 524 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF)) in rwsem_mark_wake() 528 atomic_long_add(adjustment, &sem->count); in rwsem_mark_wake() [all …]
|
D | semaphore.c | 58 if (likely(sem->count > 0)) in down() 59 sem->count--; in down() 81 if (likely(sem->count > 0)) in down_interruptible() 82 sem->count--; in down_interruptible() 107 if (likely(sem->count > 0)) in down_killable() 108 sem->count--; in down_killable() 133 int count; in down_trylock() local 136 count = sem->count - 1; in down_trylock() 137 if (likely(count >= 0)) in down_trylock() 138 sem->count = count; in down_trylock() [all …]
|
D | lock_events.c | 62 size_t count, loff_t *ppos) in lockevent_read() argument 80 return simple_read_from_buffer(user_buf, count, ppos, buf, len); in lockevent_read() 89 size_t count, loff_t *ppos) in lockevent_write() argument 97 return count; in lockevent_write() 106 return count; in lockevent_write()
|
/kernel/ |
D | ksysfs.c | 47 const char *buf, size_t count) in uevent_helper_store() argument 49 if (count+1 > UEVENT_HELPER_PATH_LEN) in uevent_helper_store() 51 memcpy(uevent_helper, buf, count); in uevent_helper_store() 52 uevent_helper[count] = '\0'; in uevent_helper_store() 53 if (count && uevent_helper[count-1] == '\n') in uevent_helper_store() 54 uevent_helper[count-1] = '\0'; in uevent_helper_store() 55 return count; in uevent_helper_store() 68 const char *buf, size_t count) in profiling_store() argument 86 return count; in profiling_store() 118 const char *buf, size_t count) in kexec_crash_size_store() argument [all …]
|
D | user_namespace.c | 115 atomic_set(&ns->count, 1); in create_user_ns() 201 } while (atomic_dec_and_test(&parent->count)); in free_user_ns() 217 u32 count; /* == 0 unless used with map_id_range_down() */ member 230 id2 = key->id + key->count - 1; in cmp_map_id() 238 last = first + el->count - 1; in cmp_map_id() 255 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_max() argument 260 key.count = count; in map_id_range_down_max() 273 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_base() argument 278 id2 = id + count - 1; in map_id_range_down_base() 283 last = first + map->extent[idx].count - 1; in map_id_range_down_base() [all …]
|
D | profile.c | 436 const char __user *buffer, size_t count, loff_t *pos) in prof_cpu_mask_proc_write() argument 444 err = cpumask_parse_user(buffer, count, new_value); in prof_cpu_mask_proc_write() 447 err = count; in prof_cpu_mask_proc_write() 474 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) in read_profile() argument 484 if (count > (prof_len+1)*sizeof(unsigned int) - p) in read_profile() 485 count = (prof_len+1)*sizeof(unsigned int) - p; in read_profile() 488 while (p < sizeof(unsigned int) && count > 0) { in read_profile() 491 buf++; p++; count--; read++; in read_profile() 494 if (copy_to_user(buf, (void *)pnt, count)) in read_profile() 496 read += count; in read_profile() [all …]
|
D | fail_function.c | 237 size_t count, loff_t *ppos) in fei_write() argument 245 if (count > KSYM_NAME_LEN) in fei_write() 246 count = KSYM_NAME_LEN; in fei_write() 247 buf = kmalloc(count + 1, GFP_KERNEL); in fei_write() 251 if (copy_from_user(buf, buffer, count)) { in fei_write() 255 buf[count] = '\0'; in fei_write() 263 ret = count; in fei_write() 274 ret = count; in fei_write() 304 ret = count; in fei_write()
|
D | audit_tree.c | 14 refcount_t count; member 30 int count; member 99 refcount_set(&tree->count, 1); in alloc_tree() 113 refcount_inc(&tree->count); in get_tree() 118 if (refcount_dec_and_test(&tree->count)) in put_tree() 132 for (i = 0; i < chunk->count; i++) { in free_chunk() 188 static struct audit_chunk *alloc_chunk(int count) in alloc_chunk() argument 193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL); in alloc_chunk() 199 chunk->count = count; in alloc_chunk() 201 for (i = 0; i < count; i++) { in alloc_chunk() [all …]
|
D | latencytop.c | 118 latency_record[i].count++; in account_global_scheduler_latency() 167 lat.count = 1; in __account_scheduler_latency() 195 mylat->count++; in __account_scheduler_latency() 229 lr->count, lr->time, lr->max); in lstats_show() 245 lstats_write(struct file *file, const char __user *buf, size_t count, in lstats_write() argument 250 return count; in lstats_write()
|
D | delayacct.c | 46 u32 *count) in delayacct_end() argument 54 (*count)++; in delayacct_end() 72 u32 *count; in __delayacct_blkio_end() local 76 count = &delays->swapin_count; in __delayacct_blkio_end() 79 count = &delays->blkio_count; in __delayacct_blkio_end() 82 delayacct_end(&delays->lock, &delays->blkio_start, total, count); in __delayacct_blkio_end()
|
/kernel/trace/ |
D | trace_selftest.c | 63 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) in trace_test_buffer() argument 91 if (count) in trace_test_buffer() 92 *count = cnt; in trace_test_buffer() 330 unsigned long count; in trace_selftest_startup_dynamic_tracing() local 365 ret = trace_test_buffer(&tr->array_buffer, &count); in trace_selftest_startup_dynamic_tracing() 369 if (count) { in trace_selftest_startup_dynamic_tracing() 386 ret = trace_test_buffer(&tr->array_buffer, &count); in trace_selftest_startup_dynamic_tracing() 392 if (!ret && count != 1) { in trace_selftest_startup_dynamic_tracing() 394 printk(KERN_CONT ".. filter failed count=%ld ..", count); in trace_selftest_startup_dynamic_tracing() 661 unsigned long count; in trace_selftest_startup_function() local [all …]
|
D | trace_functions.c | 293 long *count; in update_traceon_count() local 325 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); in update_traceon_count() 326 old_count = *count; in update_traceon_count() 345 *count = old_count - 1; in update_traceon_count() 433 long *count; in ftrace_stacktrace_count() local 446 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); in ftrace_stacktrace_count() 453 old_count = *count; in ftrace_stacktrace_count() 459 new_count = cmpxchg(count, old_count, new_count); in ftrace_stacktrace_count() 473 long *count = NULL; in update_count() local 476 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); in update_count() [all …]
|
D | trace_hwlat.c | 86 int count; /* # of iteratons over threash */ member 94 u64 count; /* total since reset */ member 128 entry->count = sample->count; in trace_hwlat_sample() 179 unsigned int count = 0; in get_sample() local 226 if (!count) in get_sample() 228 count++; in get_sample() 258 hwlat_data.count++; in get_sample() 259 s.seqnum = hwlat_data.count; in get_sample() 264 s.count = count; in get_sample() 598 hwlat_data.count = 0; in hwlat_tracer_init()
|
D | trace_events_trigger.c | 389 long count = (long)data; in event_trigger_print() local 393 if (count == -1) in event_trigger_print() 396 seq_printf(m, ":count=%ld", count); in event_trigger_print() 655 trigger_data->count = -1; in event_trigger_callback() 680 ret = kstrtoul(number, 0, &trigger_data->count); in event_trigger_callback() 973 if (!data->count) in traceon_count_trigger() 976 if (data->count != -1) in traceon_count_trigger() 977 (data->count)--; in traceon_count_trigger() 1019 if (!data->count) in traceoff_count_trigger() 1022 if (data->count != -1) in traceoff_count_trigger() [all …]
|
D | trace_probe.c | 581 if (kstrtouint(t2, 0, &parg->count) || !parg->count) { in traceprobe_parse_probe_arg_body() 586 if (parg->count > MAX_ARRAY_LEN) { in traceprobe_parse_probe_arg_body() 601 if (parg->count || (t && strcmp(t, "string"))) in traceprobe_parse_probe_arg_body() 611 *size += parg->type->size * (parg->count ?: 1); in traceprobe_parse_probe_arg_body() 613 if (parg->count) { in traceprobe_parse_probe_arg_body() 619 parg->count); in traceprobe_parse_probe_arg_body() 644 code->op == FETCH_OP_DATA) || parg->count) { in traceprobe_parse_probe_arg_body() 692 if (parg->count) { in traceprobe_parse_probe_arg_body() 708 code->param = parg->count; in traceprobe_parse_probe_arg_body() 863 if (parg->count) { in __set_print_fmt() [all …]
|
/kernel/debug/ |
D | gdbstub.c | 95 int count; in get_packet() local 110 count = 0; in get_packet() 115 while (count < (BUFMAX - 1)) { in get_packet() 120 buffer[count] = ch; in get_packet() 121 count = count + 1; in get_packet() 137 buffer[count] = 0; in get_packet() 148 int count; in put_packet() local 157 count = 0; in put_packet() 159 while ((ch = buffer[count])) { in put_packet() 162 count++; in put_packet() [all …]
|
/kernel/cgroup/ |
D | debug.c | 79 u64 count; in current_css_set_refcount_read() local 82 count = refcount_read(&task_css_set(current)->refcount); in current_css_set_refcount_read() 84 return count; in current_css_set_refcount_read() 125 int count = 0; in cgroup_css_links_read() local 165 if (count++ <= MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read() 171 if (count++ <= MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read() 176 if (count > MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read() 178 count - MAX_TASKS_SHOWN_PER_CSS); in cgroup_css_links_read() 185 WARN_ON(count != cset->nr_tasks); in cgroup_css_links_read()
|
/kernel/dma/ |
D | contiguous.c | 259 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous() argument 265 return cma_alloc(dev_get_cma_area(dev), count, align, GFP_KERNEL | in dma_alloc_from_contiguous() 280 int count) in dma_release_from_contiguous() argument 282 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous() 358 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_free_contiguous() local 362 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous() 370 page, count)) in dma_free_contiguous() 373 if (cma_release(dma_contiguous_default_area, page, count)) in dma_free_contiguous()
|
D | remap.c | 41 int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_common_contiguous_remap() local 46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 49 for (i = 0; i < count; i++) in dma_common_contiguous_remap() 51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
|
/kernel/bpf/ |
D | bpf_iter.c | 99 if (seq->count) { in bpf_seq_read() 100 n = min(seq->count, size); in bpf_seq_read() 106 seq->count -= n; in bpf_seq_read() 119 seq->count = 0; in bpf_seq_read() 129 seq->count = 0; in bpf_seq_read() 134 seq->count = 0; in bpf_seq_read() 142 offs = seq->count; in bpf_seq_read() 157 if (seq->count >= size) in bpf_seq_read() 172 seq->count = offs; in bpf_seq_read() 174 seq->count = offs; in bpf_seq_read() [all …]
|
/kernel/time/ |
D | clocksource.c | 1153 ssize_t count = 0; in current_clocksource_show() local 1156 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name); in current_clocksource_show() 1159 return count; in current_clocksource_show() 1191 const char *buf, size_t count) in current_clocksource_store() argument 1197 ret = sysfs_get_uname(buf, override_name, count); in current_clocksource_store() 1218 const char *buf, size_t count) in unbind_clocksource_store() argument 1224 ret = sysfs_get_uname(buf, name, count); in unbind_clocksource_store() 1238 return ret ? ret : count; in unbind_clocksource_store() 1255 ssize_t count = 0; in available_clocksource_show() local 1265 count += snprintf(buf + count, in available_clocksource_show() [all …]
|
D | test_udelay.c | 101 size_t count, loff_t *pos) in udelay_test_write() argument 108 if (count >= sizeof(lbuf)) in udelay_test_write() 111 if (copy_from_user(lbuf, buf, count)) in udelay_test_write() 113 lbuf[count] = '\0'; in udelay_test_write() 126 return count; in udelay_test_write()
|
/kernel/debug/kdb/ |
D | kdb_io.c | 208 int count; in kdb_read() local 330 count = kallsyms_symbol_complete(p_tmp, buf_size); in kdb_read() 331 if (tab == 2 && count > 0) { in kdb_read() 332 kdb_printf("\n%d symbols are found.", count); in kdb_read() 333 if (count > dtab_count) { in kdb_read() 334 count = dtab_count; in kdb_read() 338 count); in kdb_read() 341 for (i = 0; i < count; i++) { in kdb_read() 356 } else if (tab != 2 && count > 0) { in kdb_read()
|