Home
last modified time | relevance | path

Searched refs:swap (Results 1 – 18 of 18) sorted by relevance

/kernel/power/
Duser.c33 int swap; member
74 data->swap = swap_type_of(swsusp_resume_device, 0); in snapshot_open()
85 data->swap = -1; in snapshot_open()
117 free_all_swap_pages(data->swap); in snapshot_release()
242 data->swap = swap_type_of(swdev, offset); in snapshot_set_swap_area()
243 if (data->swap < 0) in snapshot_set_swap_area()
359 size = count_swap_pages(data->swap, 1); in snapshot_ioctl()
365 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { in snapshot_ioctl()
369 offset = alloc_swapdev_block(data->swap); in snapshot_ioctl()
379 if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { in snapshot_ioctl()
[all …]
Dpower.h163 extern sector_t alloc_swapdev_block(int swap);
164 extern void free_all_swap_pages(int swap);
DKconfig63 It creates an image which is saved in your active swap. Upon the next
69 need to run mkswap against the swap partition used for the suspend.
71 It also works with swap files to a limited extent (for details see
72 <file:Documentation/power/swsusp-and-swap-files.rst>).
75 meantime you cannot use the swap partition(s)/file(s) involved in
104 It should be a valid swap partition (at least for now) that is turned
114 suspended image to. It will simply pick the first available swap
Dswap.c179 sector_t alloc_swapdev_block(int swap) in alloc_swapdev_block() argument
183 offset = swp_offset(get_swap_page_of_type(swap)); in alloc_swapdev_block()
186 swap_free(swp_entry(swap, offset)); in alloc_swapdev_block()
188 return swapdev_block(swap, offset); in alloc_swapdev_block()
200 void free_all_swap_pages(int swap) in free_all_swap_pages() argument
211 swap_free(swp_entry(swap, offset)); in free_all_swap_pages()
DMakefile17 obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o
/kernel/
Dkcmp.c85 swap(l1, l2); in kcmp_lock()
Dstop_machine.c359 swap(cpu1, cpu2); in stop_two_cpus()
Dfork.c3353 swap(current->files, new_fd); in ksys_unshare()
Dworkqueue.c4052 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); in apply_wqattrs_commit()
/kernel/futex/
Dfutex.h242 swap(hb1, hb2); in double_lock_hb()
/kernel/trace/
Dbpf_trace.c2568 swap(*addr_a, *addr_b); in bpf_kprobe_multi_cookie_swap()
2569 swap(*cookie_a, *cookie_b); in bpf_kprobe_multi_cookie_swap()
2673 swap(*name_a, *name_b); in symbols_swap_r()
2681 swap(*cookie_a, *cookie_b); in symbols_swap_r()
DKconfig515 bool "Allow snapshot to swap per CPU"
520 full swap (all buffers). If this is set, then the following is
532 or irq latency tracers are enabled, as those need to swap as well
Dtrace.c1850 swap(tr->array_buffer.buffer, tr->max_buffer.buffer); in update_max_tr()
/kernel/sched/
Dsched.h2706 swap(l1, l2); in double_lock()
2715 swap(l1, l2); in double_lock_irq()
2724 swap(l1, l2); in double_raw_lock()
Dfair.c4040 swap(cfs_rq->removed.util_avg, removed_util); in update_cfs_rq_load_avg()
4041 swap(cfs_rq->removed.load_avg, removed_load); in update_cfs_rq_load_avg()
4042 swap(cfs_rq->removed.runnable_avg, removed_runnable); in update_cfs_rq_load_avg()
6422 swap(master, slave); in wake_wide()
12033 swap(curr->vruntime, se->vruntime); in task_fork_fair()
Dcore.c610 swap(rq1, rq2); in double_rq_lock()
2851 swap(dst->user_cpus_ptr, user_mask); in dup_user_cpus_ptr()
2866 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
/kernel/events/
Dcore.c904 swap(cpuctx->heap, storage); in perf_cgroup_ensure_storage()
3408 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3409 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3517 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); in perf_event_context_sched_out()
3683 swap(*lp, *rp); in swap_ptr()
12068 swap(a, b); in mutex_lock_double()
/kernel/bpf/
Dsyscall.c933 swap(*a, *b); in map_off_arr_swap()
934 swap(*sz_a, *sz_b); in map_off_arr_swap()
1764 swap(prev_key, key); in generic_map_lookup_batch()