• Home
  • Raw
  • Download

Lines Matching +full:cpu +full:- +full:nr

1 // SPDX-License-Identifier: GPL-2.0
11 * it into local per cpu caches. This has the advantage
31 #include <linux/cpu.h>
68 /* Must not be called with cpu hot plug lock */
74 /* serialize with cpu hotplug operations */ in disable_swap_slots_cache_lock()
114 static int alloc_swap_slot_cache(unsigned int cpu) in alloc_swap_slot_cache() argument
127 return -ENOMEM; in alloc_swap_slot_cache()
133 return -ENOMEM; in alloc_swap_slot_cache()
137 cache = &per_cpu(swp_slots, cpu); in alloc_swap_slot_cache()
138 if (cache->slots || cache->slots_ret) { in alloc_swap_slot_cache()
148 if (!cache->lock_initialized) { in alloc_swap_slot_cache()
149 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache()
150 spin_lock_init(&cache->free_lock); in alloc_swap_slot_cache()
151 cache->lock_initialized = true; in alloc_swap_slot_cache()
153 cache->nr = 0; in alloc_swap_slot_cache()
154 cache->cur = 0; in alloc_swap_slot_cache()
155 cache->n_ret = 0; in alloc_swap_slot_cache()
158 * !cache->slots or !cache->slots_ret to know if it is safe to acquire in alloc_swap_slot_cache()
163 cache->slots = slots; in alloc_swap_slot_cache()
164 cache->slots_ret = slots_ret; in alloc_swap_slot_cache()
169 static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, in drain_slots_cache_cpu() argument
175 cache = &per_cpu(swp_slots, cpu); in drain_slots_cache_cpu()
176 if ((type & SLOTS_CACHE) && cache->slots) { in drain_slots_cache_cpu()
177 mutex_lock(&cache->alloc_lock); in drain_slots_cache_cpu()
178 swapcache_free_entries(cache->slots + cache->cur, cache->nr); in drain_slots_cache_cpu()
179 cache->cur = 0; in drain_slots_cache_cpu()
180 cache->nr = 0; in drain_slots_cache_cpu()
181 if (free_slots && cache->slots) { in drain_slots_cache_cpu()
182 kvfree(cache->slots); in drain_slots_cache_cpu()
183 cache->slots = NULL; in drain_slots_cache_cpu()
185 mutex_unlock(&cache->alloc_lock); in drain_slots_cache_cpu()
187 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { in drain_slots_cache_cpu()
188 spin_lock_irq(&cache->free_lock); in drain_slots_cache_cpu()
189 swapcache_free_entries(cache->slots_ret, cache->n_ret); in drain_slots_cache_cpu()
190 cache->n_ret = 0; in drain_slots_cache_cpu()
191 if (free_slots && cache->slots_ret) { in drain_slots_cache_cpu()
192 slots = cache->slots_ret; in drain_slots_cache_cpu()
193 cache->slots_ret = NULL; in drain_slots_cache_cpu()
195 spin_unlock_irq(&cache->free_lock); in drain_slots_cache_cpu()
203 unsigned int cpu; in __drain_swap_slots_cache() local
214 * We cannot acquire cpu hot plug lock here as in __drain_swap_slots_cache()
215 * this function can be invoked in the cpu in __drain_swap_slots_cache()
217 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback in __drain_swap_slots_cache()
218 * -> memory allocation -> direct reclaim -> get_swap_page in __drain_swap_slots_cache()
219 * -> drain_swap_slots_cache in __drain_swap_slots_cache()
221 * Hence the loop over current online cpu below could miss cpu that in __drain_swap_slots_cache()
224 * cpu before it has been marked online. Hence, we will not in __drain_swap_slots_cache()
225 * fill any swap slots in slots cache of such cpu. in __drain_swap_slots_cache()
226 * There are no slots on such cpu that need to be drained. in __drain_swap_slots_cache()
228 for_each_online_cpu(cpu) in __drain_swap_slots_cache()
229 drain_slots_cache_cpu(cpu, type, false); in __drain_swap_slots_cache()
232 static int free_slot_cache(unsigned int cpu) in free_slot_cache() argument
235 drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); in free_slot_cache()
263 if (!use_swap_slot_cache || cache->nr) in refill_swap_slots_cache()
266 cache->cur = 0; in refill_swap_slots_cache()
268 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, in refill_swap_slots_cache()
269 cache->slots, 1); in refill_swap_slots_cache()
271 return cache->nr; in refill_swap_slots_cache()
279 if (likely(use_swap_slot_cache && cache->slots_ret)) { in free_swap_slot()
280 spin_lock_irq(&cache->free_lock); in free_swap_slot()
282 if (!use_swap_slot_cache || !cache->slots_ret) { in free_swap_slot()
283 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
286 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { in free_swap_slot()
293 swapcache_free_entries(cache->slots_ret, cache->n_ret); in free_swap_slot()
294 cache->n_ret = 0; in free_swap_slot()
296 cache->slots_ret[cache->n_ret++] = entry; in free_swap_slot()
297 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
322 * accesses to the per-CPU data structure are protected by the in get_swap_page()
323 * mutex cache->alloc_lock. in get_swap_page()
325 * The alloc path here does not touch cache->slots_ret in get_swap_page()
326 * so cache->free_lock is not taken. in get_swap_page()
330 if (likely(check_cache_active() && cache->slots)) { in get_swap_page()
331 mutex_lock(&cache->alloc_lock); in get_swap_page()
332 if (cache->slots) { in get_swap_page()
334 if (cache->nr) { in get_swap_page()
335 entry = cache->slots[cache->cur]; in get_swap_page()
336 cache->slots[cache->cur++].val = 0; in get_swap_page()
337 cache->nr--; in get_swap_page()
342 mutex_unlock(&cache->alloc_lock); in get_swap_page()