Home
last modified time | relevance | path

Searched refs:count (Results 1 – 25 of 133) sorted by relevance

123456

/kernel/irq/
Dtimings.c282 for (i = irqts->count < IRQ_TIMINGS_SIZE ? \
283 0 : irqts->count & IRQ_TIMINGS_MASK, \
284 irqts->count = min(IRQ_TIMINGS_SIZE, \
285 irqts->count); \
286 irqts->count > 0; irqts->count--, \
294 int count; member
384 int index, i, period_max, count, start, min = INT_MAX; in __irq_timings_next_event() local
387 irqs->count = irqs->last_ts = 0; in __irq_timings_next_event()
396 period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ? in __irq_timings_next_event()
397 PREDICTION_PERIOD_MAX : irqs->count / 3; in __irq_timings_next_event()
[all …]
Dproc.c138 const char __user *buffer, size_t count, loff_t *pos) in write_irq_affinity() argument
151 err = cpumask_parselist_user(buffer, count, new_value); in write_irq_affinity()
153 err = cpumask_parse_user(buffer, count, new_value); in write_irq_affinity()
167 err = irq_select_affinity_usr(irq) ? -EINVAL : count; in write_irq_affinity()
171 err = count; in write_irq_affinity()
180 const char __user *buffer, size_t count, loff_t *pos) in irq_affinity_proc_write() argument
182 return write_irq_affinity(0, file, buffer, count, pos); in irq_affinity_proc_write()
186 const char __user *buffer, size_t count, loff_t *pos) in irq_affinity_list_proc_write() argument
188 return write_irq_affinity(1, file, buffer, count, pos); in irq_affinity_list_proc_write()
236 const char __user *buffer, size_t count, loff_t *ppos) in default_affinity_write() argument
[all …]
/kernel/
Dksysfs.c46 const char *buf, size_t count) in uevent_helper_store() argument
48 if (count+1 > UEVENT_HELPER_PATH_LEN) in uevent_helper_store()
50 memcpy(uevent_helper, buf, count); in uevent_helper_store()
51 uevent_helper[count] = '\0'; in uevent_helper_store()
52 if (count && uevent_helper[count-1] == '\n') in uevent_helper_store()
53 uevent_helper[count-1] = '\0'; in uevent_helper_store()
54 return count; in uevent_helper_store()
67 const char *buf, size_t count) in profiling_store() argument
85 return count; in profiling_store()
117 const char *buf, size_t count) in kexec_crash_size_store() argument
[all …]
Duser_namespace.c132 refcount_set(&ns->ns.count, 1); in create_user_ns()
222 } while (refcount_dec_and_test(&parent->ns.count)); in free_user_ns()
238 u32 count; /* == 0 unless used with map_id_range_down() */ member
251 id2 = key->id + key->count - 1; in cmp_map_id()
259 last = first + el->count - 1; in cmp_map_id()
276 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_max() argument
281 key.count = count; in map_id_range_down_max()
294 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_base() argument
299 id2 = id + count - 1; in map_id_range_down_base()
304 last = first + map->extent[idx].count - 1; in map_id_range_down_base()
[all …]
Dprofile.c412 const char __user *buffer, size_t count, loff_t *pos) in prof_cpu_mask_proc_write() argument
420 err = cpumask_parse_user(buffer, count, new_value); in prof_cpu_mask_proc_write()
423 err = count; in prof_cpu_mask_proc_write()
450 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) in read_profile() argument
460 if (count > (prof_len+1)*sizeof(unsigned int) - p) in read_profile()
461 count = (prof_len+1)*sizeof(unsigned int) - p; in read_profile()
464 while (p < sizeof(unsigned int) && count > 0) { in read_profile()
467 buf++; p++; count--; read++; in read_profile()
470 if (copy_to_user(buf, (void *)pnt, count)) in read_profile()
472 read += count; in read_profile()
[all …]
Daudit_tree.c14 refcount_t count; member
30 int count; member
99 refcount_set(&tree->count, 1); in alloc_tree()
113 refcount_inc(&tree->count); in get_tree()
118 if (refcount_dec_and_test(&tree->count)) in put_tree()
132 for (i = 0; i < chunk->count; i++) { in free_chunk()
188 static struct audit_chunk *alloc_chunk(int count) in alloc_chunk() argument
193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL); in alloc_chunk()
199 chunk->count = count; in alloc_chunk()
201 for (i = 0; i < count; i++) { in alloc_chunk()
[all …]
Dlatencytop.c144 latency_record[i].count++; in account_global_scheduler_latency()
193 lat.count = 1; in __account_scheduler_latency()
221 mylat->count++; in __account_scheduler_latency()
255 lr->count, lr->time, lr->max); in lstats_show()
271 lstats_write(struct file *file, const char __user *buf, size_t count, in lstats_write() argument
276 return count; in lstats_write()
Dfail_function.c237 size_t count, loff_t *ppos) in fei_write() argument
245 if (count > KSYM_NAME_LEN) in fei_write()
246 count = KSYM_NAME_LEN; in fei_write()
248 buf = memdup_user_nul(buffer, count); in fei_write()
259 ret = count; in fei_write()
270 ret = count; in fei_write()
300 ret = count; in fei_write()
/kernel/locking/
Drwsem.c73 #c, atomic_long_read(&(sem)->count), \
195 long count = atomic_long_read(&sem->count); in is_rwsem_reader_owned() local
197 if (count & RWSEM_WRITER_MASK) in is_rwsem_reader_owned()
245 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count); in rwsem_read_trylock()
265 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { in rwsem_write_trylock()
330 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE); in __init_rwsem()
401 atomic_long_andnot(RWSEM_FLAG_HANDOFF | RWSEM_FLAG_WAITERS, &sem->count); in rwsem_del_waiter()
454 if (unlikely(atomic_long_read(&sem->count) < 0)) in rwsem_mark_wake()
466 oldcount = atomic_long_fetch_add(adjustment, &sem->count); in rwsem_mark_wake()
481 atomic_long_add(-adjustment, &sem->count); in rwsem_mark_wake()
[all …]
Dsemaphore.c60 if (likely(sem->count > 0)) in down()
61 sem->count--; in down()
84 if (likely(sem->count > 0)) in down_interruptible()
85 sem->count--; in down_interruptible()
111 if (likely(sem->count > 0)) in down_killable()
112 sem->count--; in down_killable()
137 int count; in down_trylock() local
140 count = sem->count - 1; in down_trylock()
141 if (likely(count >= 0)) in down_trylock()
142 sem->count = count; in down_trylock()
[all …]
Dlock_events.c62 size_t count, loff_t *ppos) in lockevent_read() argument
80 return simple_read_from_buffer(user_buf, count, ppos, buf, len); in lockevent_read()
89 size_t count, loff_t *ppos) in lockevent_write() argument
97 return count; in lockevent_write()
106 return count; in lockevent_write()
/kernel/trace/
Dtrace_selftest.c63 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) in trace_test_buffer() argument
91 if (count) in trace_test_buffer()
92 *count = cnt; in trace_test_buffer()
361 unsigned long count; in trace_selftest_startup_dynamic_tracing() local
396 ret = trace_test_buffer(&tr->array_buffer, &count); in trace_selftest_startup_dynamic_tracing()
400 if (count) { in trace_selftest_startup_dynamic_tracing()
417 ret = trace_test_buffer(&tr->array_buffer, &count); in trace_selftest_startup_dynamic_tracing()
423 if (!ret && count != 1) { in trace_selftest_startup_dynamic_tracing()
425 printk(KERN_CONT ".. filter failed count=%ld ..", count); in trace_selftest_startup_dynamic_tracing()
694 unsigned long count; in trace_selftest_startup_function() local
[all …]
Dtrace_functions.c255 last_info->count < U16_MAX) { in is_repeat_check()
258 last_info->count++; in is_repeat_check()
270 if (last_info->count) { in process_repeats()
272 last_info->count = 0; in process_repeats()
449 long *count; in update_traceon_count() local
481 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); in update_traceon_count()
482 old_count = *count; in update_traceon_count()
501 *count = old_count - 1; in update_traceon_count()
587 long *count; in ftrace_stacktrace_count() local
600 count = (long *)ftrace_func_mapper_find_ip(mapper, ip); in ftrace_stacktrace_count()
[all …]
Dtrace_osnoise.c147 u64 count; member
155 u64 count; member
166 u64 count; member
175 u64 count; member
218 u64 count; member
881 osn_var->nmi.count++; in trace_osnoise_callback()
905 osn_var->irq.count++; in osnoise_trace_irq_entry()
1036 osn_var->softirq.count++; in trace_softirq_entry_callback()
1134 osn_var->thread.count++; in thread_entry()
1221 s->nmi_count = osn_var->nmi.count; in save_osn_sample_stats()
[all …]
Dtrace_events_trigger.c417 long count = (long)data; in event_trigger_print() local
421 if (count == -1) in event_trigger_print()
424 seq_printf(m, ":count=%ld", count); in event_trigger_print()
839 trigger_data->count = -1; in event_trigger_alloc()
877 ret = kstrtoul(number, 0, &trigger_data->count); in event_trigger_parse_num()
1286 if (!data->count) in traceon_count_trigger()
1289 if (data->count != -1) in traceon_count_trigger()
1290 (data->count)--; in traceon_count_trigger()
1334 if (!data->count) in traceoff_count_trigger()
1337 if (data->count != -1) in traceoff_count_trigger()
[all …]
Dtrace_hwlat.c99 int count; /* # of iterations over thresh */ member
107 u64 count; /* total since reset */ member
149 entry->count = sample->count; in trace_hwlat_sample()
212 unsigned int count = 0; in get_sample() local
258 if (!count) in get_sample()
260 count++; in get_sample()
290 hwlat_data.count++; in get_sample()
291 s.seqnum = hwlat_data.count; in get_sample()
296 s.count = count; in get_sample()
838 hwlat_data.count = 0; in hwlat_tracer_init()
/kernel/trace/rv/
Drv.c212 static ssize_t monitor_enable_read_data(struct file *filp, char __user *user_buf, size_t count, in monitor_enable_read_data() argument
220 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1); in monitor_enable_read_data()
283 size_t count, loff_t *ppos) in monitor_enable_write_data() argument
289 retval = kstrtobool_from_user(user_buf, count, &val); in monitor_enable_write_data()
293 retval = count; in monitor_enable_write_data()
304 return retval ? : count; in monitor_enable_write_data()
317 static ssize_t monitor_desc_read_data(struct file *filp, char __user *user_buf, size_t count, in monitor_desc_read_data() argument
327 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff) + 1); in monitor_desc_read_data()
513 size_t count, loff_t *ppos) in enabled_monitors_write() argument
522 if (count < 1 || count > MAX_RV_MONITOR_NAME_SIZE + 1) in enabled_monitors_write()
[all …]
Drv_reactors.c188 size_t count, loff_t *ppos) in monitor_reactors_write() argument
199 if (count < 1 || count > MAX_RV_REACTOR_NAME_SIZE + 1) in monitor_reactors_write()
204 retval = simple_write_to_buffer(buff, sizeof(buff) - 1, ppos, user_buf, count); in monitor_reactors_write()
212 return count; in monitor_reactors_write()
235 retval = count; in monitor_reactors_write()
376 size_t count, loff_t *ppos) in reacting_on_read_data() argument
382 return simple_read_from_buffer(user_buf, count, ppos, buff, strlen(buff)+1); in reacting_on_read_data()
400 size_t count, loff_t *ppos) in reacting_on_write_data() argument
405 retval = kstrtobool_from_user(user_buf, count, &val); in reacting_on_write_data()
424 return count; in reacting_on_write_data()
/kernel/events/
Dhw_breakpoint.c44 atomic_t count[hw_breakpoint_slots(0)]; member
46 atomic_t *count;
188 hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL); in bp_slots_histogram_alloc()
189 return hist->count; in bp_slots_histogram_alloc()
194 kfree(hist->count); in bp_slots_histogram_free()
243 WARN_ON(atomic_dec_return_relaxed(&hist->count[old_idx]) < 0); in bp_slots_histogram_add()
245 WARN_ON(atomic_inc_return_relaxed(&hist->count[new_idx]) < 0); in bp_slots_histogram_add()
252 const int count = atomic_read(&hist->count[i]); in bp_slots_histogram_max() local
255 ASSERT_EXCLUSIVE_WRITER(hist->count[i]); in bp_slots_histogram_max()
256 if (count > 0) in bp_slots_histogram_max()
[all …]
/kernel/debug/
Dgdbstub.c92 int count; in get_packet() local
107 count = 0; in get_packet()
112 while (count < (BUFMAX - 1)) { in get_packet()
117 buffer[count] = ch; in get_packet()
118 count = count + 1; in get_packet()
134 buffer[count] = 0; in get_packet()
145 int count; in put_packet() local
154 count = 0; in put_packet()
156 while ((ch = buffer[count])) { in put_packet()
159 count++; in put_packet()
[all …]
/kernel/futex/
Dwaitwake.c378 static int unqueue_multiple(struct futex_vector *v, int count) in unqueue_multiple() argument
382 for (i = 0; i < count; i++) { in unqueue_multiple()
406 static int futex_wait_multiple_setup(struct futex_vector *vs, int count, int *woken) in futex_wait_multiple_setup() argument
428 for (i = 0; i < count; i++) { in futex_wait_multiple_setup()
442 for (i = 0; i < count; i++) { in futex_wait_multiple_setup()
503 static void futex_sleep_multiple(struct futex_vector *vs, unsigned int count, in futex_sleep_multiple() argument
509 for (; count; count--, vs++) { in futex_sleep_multiple()
531 int futex_wait_multiple(struct futex_vector *vs, unsigned int count, in futex_wait_multiple() argument
540 ret = futex_wait_multiple_setup(vs, count, &hint); in futex_wait_multiple()
549 futex_sleep_multiple(vs, count, to); in futex_wait_multiple()
[all …]
/kernel/cgroup/
Ddebug.c79 u64 count; in current_css_set_refcount_read() local
82 count = refcount_read(&task_css_set(current)->refcount); in current_css_set_refcount_read()
84 return count; in current_css_set_refcount_read()
125 int count = 0; in cgroup_css_links_read() local
165 if (count++ <= MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read()
171 if (count++ <= MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read()
176 if (count > MAX_TASKS_SHOWN_PER_CSS) in cgroup_css_links_read()
178 count - MAX_TASKS_SHOWN_PER_CSS); in cgroup_css_links_read()
185 WARN_ON(count != cset->nr_tasks); in cgroup_css_links_read()
/kernel/dma/
Dcontiguous.c258 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous() argument
264 return cma_alloc(dev_get_cma_area(dev), count, align, no_warn); in dma_alloc_from_contiguous()
278 int count) in dma_release_from_contiguous() argument
280 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous()
350 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_free_contiguous() local
354 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous()
362 page, count)) in dma_free_contiguous()
365 if (cma_release(dma_contiguous_default_area, page, count)) in dma_free_contiguous()
Dremap.c41 int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in dma_common_contiguous_remap() local
46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
49 for (i = 0; i < count; i++) in dma_common_contiguous_remap()
51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
/kernel/bpf/
Dbpf_iter.c114 if (seq->count) { in bpf_seq_read()
115 n = min(seq->count, size); in bpf_seq_read()
121 seq->count -= n; in bpf_seq_read()
134 seq->count = 0; in bpf_seq_read()
144 seq->count = 0; in bpf_seq_read()
149 seq->count = 0; in bpf_seq_read()
158 offs = seq->count; in bpf_seq_read()
173 if (seq->count >= size) in bpf_seq_read()
188 seq->count = offs; in bpf_seq_read()
190 seq->count = offs; in bpf_seq_read()
[all …]

123456