| /kernel/irq/ |
| D | timings.c | 282 for (i = irqts->count < IRQ_TIMINGS_SIZE ? \ 283 0 : irqts->count & IRQ_TIMINGS_MASK, \ 284 irqts->count = min(IRQ_TIMINGS_SIZE, \ 285 irqts->count); \ 286 irqts->count > 0; irqts->count--, \ 294 int count; member 384 int index, i, period_max, count, start, min = INT_MAX; in __irq_timings_next_event() local 387 irqs->count = irqs->last_ts = 0; in __irq_timings_next_event() 396 period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ? in __irq_timings_next_event() 397 PREDICTION_PERIOD_MAX : irqs->count / 3; in __irq_timings_next_event() [all …]
|
| D | proc.c | 136 const char __user *buffer, size_t count, loff_t *pos) in write_irq_affinity() argument 149 err = cpumask_parselist_user(buffer, count, new_value); in write_irq_affinity() 151 err = cpumask_parse_user(buffer, count, new_value); in write_irq_affinity() 165 err = irq_select_affinity_usr(irq) ? -EINVAL : count; in write_irq_affinity() 169 err = count; in write_irq_affinity() 178 const char __user *buffer, size_t count, loff_t *pos) in irq_affinity_proc_write() argument 180 return write_irq_affinity(0, file, buffer, count, pos); in irq_affinity_proc_write() 184 const char __user *buffer, size_t count, loff_t *pos) in irq_affinity_list_proc_write() argument 186 return write_irq_affinity(1, file, buffer, count, pos); in irq_affinity_list_proc_write() 234 const char __user *buffer, size_t count, loff_t *ppos) in default_affinity_write() argument [all …]
|
| /kernel/locking/ |
| D | rwsem.c | 73 #c, atomic_long_read(&(sem)->count), \ 207 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned() local 209 if (count & RWSEM_WRITER_MASK) in is_rwsem_reader_owned() 259 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock() 284 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock() 339 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem() 412 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); in rwsem_del_waiter() 465 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake() 477 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake() 492 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake() [all …]
|
| D | semaphore.c | 61 if (likely(sem->count > 0)) in down() 62 sem->count--; in down() 85 if (likely(sem->count > 0)) in down_interruptible() 86 sem->count--; in down_interruptible() 112 if (likely(sem->count > 0)) in down_killable() 113 sem->count--; in down_killable() 138 int count; in down_trylock() local 141 count = sem->count - 1; in down_trylock() 142 if (likely(count >= 0)) in down_trylock() 143 sem->count = count; in down_trylock() [all …]
|
| D | lock_events.c | 62 size_t count, loff_t *ppos) in lockevent_read() argument 80 return simple_read_from_buffer(user_buf, count, ppos, buf, len); in lockevent_read() 89 size_t count, loff_t *ppos) in lockevent_write() argument 97 return count; in lockevent_write() 106 return count; in lockevent_write()
|
| /kernel/ |
| D | ksysfs.c | 71 const char *buf, size_t count) in uevent_helper_store() argument 73 if (count+1 > UEVENT_HELPER_PATH_LEN) in uevent_helper_store() 75 memcpy(uevent_helper, buf, count); in uevent_helper_store() 76 uevent_helper[count] = '\0'; in uevent_helper_store() 77 if (count && uevent_helper[count-1] == '\n') in uevent_helper_store() 78 uevent_helper[count-1] = '\0'; in uevent_helper_store() 79 return count; in uevent_helper_store() 92 const char *buf, size_t count) in profiling_store() argument 117 return count; in profiling_store() 150 const char *buf, size_t count) in kexec_crash_size_store() argument [all …]
|
| D | user_namespace.c | 132 refcount_set(&ns->ns.count, 1); in create_user_ns() 225 } while (refcount_dec_and_test(&parent->ns.count)); in free_user_ns() 241 u32 count; /* == 0 unless used with map_id_range_down() */ member 254 id2 = key->id + key->count - 1; in cmp_map_id() 262 last = first + el->count - 1; in cmp_map_id() 279 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_max() argument 284 key.count = count; in map_id_range_down_max() 297 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_base() argument 302 id2 = id + count - 1; in map_id_range_down_base() 307 last = first + map->extent[idx].count - 1; in map_id_range_down_base() [all …]
|
| D | profile.c | 207 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) in read_profile() argument 216 if (count > (prof_len+1)*sizeof(unsigned int) - p) in read_profile() 217 count = (prof_len+1)*sizeof(unsigned int) - p; in read_profile() 220 while (p < sizeof(unsigned int) && count > 0) { in read_profile() 223 buf++; p++; count--; read++; in read_profile() 226 if (copy_to_user(buf, (void *)pnt, count)) in read_profile() 228 read += count; in read_profile() 246 size_t count, loff_t *ppos) in write_profile() argument 249 if (count == sizeof(int)) { in write_profile() 260 return count; in write_profile()
|
| D | audit_tree.c | 14 refcount_t count; member 30 int count; member 37 } owners[] __counted_by(count); 99 refcount_set(&tree->count, 1); in alloc_tree() 113 refcount_inc(&tree->count); in get_tree() 118 if (refcount_dec_and_test(&tree->count)) in put_tree() 132 for (i = 0; i < chunk->count; i++) { in free_chunk() 188 static struct audit_chunk *alloc_chunk(int count) in alloc_chunk() argument 193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL); in alloc_chunk() 199 chunk->count = count; in alloc_chunk() [all …]
|
| D | latencytop.c | 143 latency_record[i].count++; in account_global_scheduler_latency() 192 lat.count = 1; in __account_scheduler_latency() 220 mylat->count++; in __account_scheduler_latency() 254 lr->count, lr->time, lr->max); in lstats_show() 270 lstats_write(struct file *file, const char __user *buf, size_t count, in lstats_write() argument 275 return count; in lstats_write()
|
| D | fail_function.c | 237 size_t count, loff_t *ppos) in fei_write() argument 245 if (count > KSYM_NAME_LEN) in fei_write() 246 count = KSYM_NAME_LEN; in fei_write() 248 buf = memdup_user_nul(buffer, count); in fei_write() 259 ret = count; in fei_write() 270 ret = count; in fei_write() 300 ret = count; in fei_write()
|
| /kernel/trace/ |
| D | trace_selftest.c | 63 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) in trace_test_buffer() argument 91 if (count) in trace_test_buffer() 92 *count = cnt; in trace_test_buffer() 361 unsigned long count; in trace_selftest_startup_dynamic_tracing() local 396 ret = trace_test_buffer(&tr->array_buffer, &count); in trace_selftest_startup_dynamic_tracing() 400 if (count) { in trace_selftest_startup_dynamic_tracing() 417 ret = trace_test_buffer(&tr->array_buffer, &count); in trace_selftest_startup_dynamic_tracing() 423 if (!ret && count != 1) { in trace_selftest_startup_dynamic_tracing() 425 printk(KERN_CONT ".. filter failed count=%ld ..", count); in trace_selftest_startup_dynamic_tracing() 694 unsigned long count; in trace_selftest_startup_function() local [all …]
|
| D | trace_functions.c | 268 last_info->count < U16_MAX) { in is_repeat_check() 271 last_info->count++; in is_repeat_check() 283 if (last_info->count) { in process_repeats() 285 last_info->count = 0; in process_repeats() 460 long *count; in update_traceon_count() local 492 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); in update_traceon_count() 493 old_count = *count; in update_traceon_count() 512 *count = old_count - 1; in update_traceon_count() 594 long *count; in ftrace_stacktrace_count() local 607 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); in ftrace_stacktrace_count() [all …]
|
| D | trace_osnoise.c | 169 u64 count; member 177 u64 count; member 189 u64 count; member 198 u64 count; member 246 u64 count; member 911 osn_var->nmi.count++; in trace_osnoise_callback() 935 osn_var->irq.count++; in osnoise_trace_irq_entry() 1066 osn_var->softirq.count++; in trace_softirq_entry_callback() 1164 osn_var->thread.count++; in thread_entry() 1344 s->nmi_count = osn_var->nmi.count; in save_osn_sample_stats() [all …]
|
| D | trace_events_trigger.c | 419 long count = (long)data; in event_trigger_print() local 423 if (count == -1) in event_trigger_print() 426 seq_printf(m, ":count=%ld", count); in event_trigger_print() 857 trigger_data->count = -1; in trigger_data_alloc() 895 ret = kstrtoul(number, 0, &trigger_data->count); in event_trigger_parse_num() 1317 if (!data->count) in traceon_count_trigger() 1320 if (data->count != -1) in traceon_count_trigger() 1321 (data->count)--; in traceon_count_trigger() 1365 if (!data->count) in traceoff_count_trigger() 1368 if (data->count != -1) in traceoff_count_trigger() [all …]
|
| D | trace_hwlat.c | 99 int count; /* # of iterations over thresh */ member 107 u64 count; /* total since reset */ member 149 entry->count = sample->count; in trace_hwlat_sample() 212 unsigned int count = 0; in get_sample() local 258 if (!count) in get_sample() 260 count++; in get_sample() 290 hwlat_data.count++; in get_sample() 291 s.seqnum = hwlat_data.count; in get_sample() 296 s.count = count; in get_sample() 840 hwlat_data.count = 0; in hwlat_tracer_init()
|
| /kernel/events/ |
| D | hw_breakpoint.c | 44 atomic_t count[hw_breakpoint_slots(0)]; member 46 atomic_t *count; 188 hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL); in bp_slots_histogram_alloc() 189 return hist->count; in bp_slots_histogram_alloc() 194 kfree(hist->count); in bp_slots_histogram_free() 243 WARN_ON(atomic_dec_return_relaxed(&hist->count[old_idx]) < 0); in bp_slots_histogram_add() 245 WARN_ON(atomic_inc_return_relaxed(&hist->count[new_idx]) < 0); in bp_slots_histogram_add() 252 const int count = atomic_read(&hist->count[i]); in bp_slots_histogram_max() local 255 ASSERT_EXCLUSIVE_WRITER(hist->count[i]); in bp_slots_histogram_max() 256 if (count > 0) in bp_slots_histogram_max() [all …]
|
| /kernel/trace/rv/ |
| D | rv.c | 212 static ssize_t monitor_enable_read_data(struct file *filp, char __user *user_buf, size_t count, in monitor_enable_read_data() argument 220 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1); in monitor_enable_read_data() 285 size_t count, loff_t *ppos) in monitor_enable_write_data() argument 291 retval = kstrtobool_from_user(user_buf, count, &val); in monitor_enable_write_data() 304 return retval ? : count; in monitor_enable_write_data() 316 static ssize_t monitor_desc_read_data(struct file *filp, char __user *user_buf, size_t count, in monitor_desc_read_data() argument 326 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1); in monitor_desc_read_data() 511 size_t count, loff_t *ppos) in enabled_monitors_write() argument 520 if (count < 1 || count > MAX_RV_MONITOR_NAME_SIZE + 1) in enabled_monitors_write() 525 retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count); in enabled_monitors_write() [all …]
|
| D | rv_reactors.c | 188 size_t count, loff_t *ppos) in monitor_reactors_write() argument 199 if (count < 1 || count > MAX_RV_REACTOR_NAME_SIZE + 1) in monitor_reactors_write() 204 retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count); in monitor_reactors_write() 212 return count; in monitor_reactors_write() 235 retval = count; in monitor_reactors_write() 376 size_t count, loff_t *ppos) in reacting_on_read_data() argument 382 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1); in reacting_on_read_data() 400 size_t count, loff_t *ppos) in reacting_on_write_data() argument 405 retval = kstrtobool_from_user(user_buf, count, &val); in reacting_on_write_data() 424 return count; in reacting_on_write_data()
|
| /kernel/power/ |
| D | main.c | 155 ssize_t count = 0; in mem_sleep_show() local 165 count += sysfs_emit_at(buf, count, "[%s] ", label); in mem_sleep_show() 167 count += sysfs_emit_at(buf, count, "%s ", label); in mem_sleep_show() 172 if (count > 0) in mem_sleep_show() 173 buf[count - 1] = '\n'; in mem_sleep_show() 175 return count; in mem_sleep_show() 273 ssize_t count = 0; in pm_test_show() local 279 count += sysfs_emit_at(buf, count, "[%s] ", pm_tests[level]); in pm_test_show() 281 count += sysfs_emit_at(buf, count, "%s ", pm_tests[level]); in pm_test_show() 285 if (count > 0) in pm_test_show() [all …]
|
| /kernel/dma/ |
| D | contiguous.c | 103 int nid, count = 0; in early_numa_cma() local 108 if (sscanf(s, "%lu%n", &tmp, &count) != 1) in early_numa_cma() 111 if (s[count] == ':') { in early_numa_cma() 116 s += count + 1; in early_numa_cma() 309 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous() argument 315 return cma_alloc(dev_get_cma_area(dev), count, align, no_warn); in dma_alloc_from_contiguous() 329 int count) in dma_release_from_contiguous() argument 331 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous() 408 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_free_contiguous() local 412 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous() [all …]
|
| /kernel/debug/ |
| D | gdbstub.c | 92 int count; in get_packet() local 107 count = 0; in get_packet() 112 while (count < (BUFMAX - 1)) { in get_packet() 117 buffer[count] = ch; in get_packet() 118 count = count + 1; in get_packet() 134 buffer[count] = 0; in get_packet() 145 int count; in put_packet() local 154 count = 0; in put_packet() 156 while ((ch = buffer[count])) { in put_packet() 159 count++; in put_packet() [all …]
|
| /kernel/futex/ |
| D | waitwake.c | 393 int futex_unqueue_multiple(struct futex_vector *v, int count) in futex_unqueue_multiple() argument 397 for (i = 0; i < count; i++) { in futex_unqueue_multiple() 421 int futex_wait_multiple_setup(struct futex_vector *vs, int count, int *woken) in futex_wait_multiple_setup() argument 443 for (i = 0; i < count; i++) { in futex_wait_multiple_setup() 457 for (i = 0; i < count; i++) { in futex_wait_multiple_setup() 518 static void futex_sleep_multiple(struct futex_vector *vs, unsigned int count, in futex_sleep_multiple() argument 524 for (; count; count--, vs++) { in futex_sleep_multiple() 546 int futex_wait_multiple(struct futex_vector *vs, unsigned int count, in futex_wait_multiple() argument 555 ret = futex_wait_multiple_setup(vs, count, &hint); in futex_wait_multiple() 564 futex_sleep_multiple(vs, count, to); in futex_wait_multiple() [all …]
|
| /kernel/cgroup/ |
| D | debug.c | 79 u64 count; in current_css_set_refcount_read() local 82 count = refcount_read(&task_css_set(current)->refcount); in current_css_set_refcount_read() 84 return count; in current_css_set_refcount_read() 125 int count = 0; in cgroup_css_links_read() local 165 if (count++ <= MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read() 171 if (count++ <= MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read() 176 if (count > MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read() 178 count - MAX_TASKS_SHOWN_PER_CSS); in cgroup_css_links_read() 185 WARN_ON(count != cset->nr_tasks); in cgroup_css_links_read()
|
| /kernel/bpf/ |
| D | mprog.c | 404 u32 id, count = 0; in bpf_mprog_query() local 412 count = bpf_mprog_total(entry); in bpf_mprog_query() 418 if (copy_to_user(&uattr->query.count, &count, sizeof(count))) in bpf_mprog_query() 424 if (attr->query.count == 0 || !uprog_id || !count) in bpf_mprog_query() 426 if (attr->query.count < count) { in bpf_mprog_query() 427 count = attr->query.count; in bpf_mprog_query() 448 if (i + 1 == count) in bpf_mprog_query()
|