Home
last modified time | relevance | path

Searched full:zone (Results 1 – 25 of 1077) sorted by relevance

12345678910>>...44

/kernel/linux/linux-5.10/fs/pstore/
Dzone.c26 * struct psz_head - header of zone to flush to storage
31 * @data: zone data.
66 * @off: zone offset of storage
67 * @type: front-end type for this zone
68 * @name: front-end name for this zone
69 * @buffer: pointer to data buffer managed by this zone
72 * @should_recover: whether this zone should recover from storage
75 * zone structure in memory.
90 * struct psz_context - all about running state of pstore/zone
93 * @ppsz: pmsg storage zone
[all …]
/kernel/linux/linux-5.10/drivers/block/
Dnull_blk_zoned.c27 pr_err("Zone size larger than device capacity\n"); in null_init_zoned_dev()
35 pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n", in null_init_zoned_dev()
54 * released to avoid scheduling in atomic context. To guarantee zone in null_init_zoned_dev()
77 pr_info("zone_max_active limit disabled, limit >= zone count\n"); in null_init_zoned_dev()
87 pr_info("zone_max_open limit disabled, limit >= zone count\n"); in null_init_zoned_dev()
91 struct blk_zone *zone = &dev->zones[i]; in null_init_zoned_dev() local
93 zone->start = sector; in null_init_zoned_dev()
94 zone->len = dev->zone_size_sects; in null_init_zoned_dev()
95 zone->capacity = zone->len; in null_init_zoned_dev()
96 zone->wp = zone->start + zone->len; in null_init_zoned_dev()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/
Dttm_memory.c81 struct ttm_mem_zone *zone = in ttm_mem_zone_kobj_release() local
84 pr_info("Zone %7s: Used memory at exit: %llu KiB\n", in ttm_mem_zone_kobj_release()
85 zone->name, (unsigned long long)zone->used_mem >> 10); in ttm_mem_zone_kobj_release()
86 kfree(zone); in ttm_mem_zone_kobj_release()
93 struct ttm_mem_zone *zone = in ttm_mem_zone_show() local
97 spin_lock(&zone->glob->lock); in ttm_mem_zone_show()
99 val = zone->zone_mem; in ttm_mem_zone_show()
101 val = zone->emer_mem; in ttm_mem_zone_show()
103 val = zone->max_mem; in ttm_mem_zone_show()
105 val = zone->swap_limit; in ttm_mem_zone_show()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/ttm/
Dttm_memory.c78 struct ttm_mem_zone *zone = in ttm_mem_zone_kobj_release() local
81 pr_info("Zone %7s: Used memory at exit: %llu kiB\n", in ttm_mem_zone_kobj_release()
82 zone->name, (unsigned long long)zone->used_mem >> 10); in ttm_mem_zone_kobj_release()
83 kfree(zone); in ttm_mem_zone_kobj_release()
90 struct ttm_mem_zone *zone = in ttm_mem_zone_show() local
94 spin_lock(&zone->glob->lock); in ttm_mem_zone_show()
96 val = zone->zone_mem; in ttm_mem_zone_show()
98 val = zone->emer_mem; in ttm_mem_zone_show()
100 val = zone->max_mem; in ttm_mem_zone_show()
102 val = zone->swap_limit; in ttm_mem_zone_show()
[all …]
/kernel/linux/linux-5.10/include/uapi/linux/
Dblkzoned.h25 * @BLK_ZONE_TYPE_CONVENTIONAL: The zone has no write pointer and can be writen
26 * randomly. Zone reset has no effect on the zone.
27 * @BLK_ZONE_TYPE_SEQWRITE_REQ: The zone must be written sequentially
28 * @BLK_ZONE_TYPE_SEQWRITE_PREF: The zone can be written non-sequentially
39 * enum blk_zone_cond - Condition [state] of a zone in a zoned device.
41 * @BLK_ZONE_COND_NOT_WP: The zone has no write pointer, it is conventional.
42 * @BLK_ZONE_COND_EMPTY: The zone is empty.
43 * @BLK_ZONE_COND_IMP_OPEN: The zone is open, but not explicitly opened.
45 * OPEN ZONE command.
46 * @BLK_ZONE_COND_CLOSED: The zone was [explicitly] closed after writing.
[all …]
/kernel/linux/linux-4.19/include/uapi/linux/
Dblkzoned.h25 * @BLK_ZONE_TYPE_CONVENTIONAL: The zone has no write pointer and can be writen
26 * randomly. Zone reset has no effect on the zone.
27 * @BLK_ZONE_TYPE_SEQWRITE_REQ: The zone must be written sequentially
28 * @BLK_ZONE_TYPE_SEQWRITE_PREF: The zone can be written non-sequentially
39 * enum blk_zone_cond - Condition [state] of a zone in a zoned device.
41 * @BLK_ZONE_COND_NOT_WP: The zone has no write pointer, it is conventional.
42 * @BLK_ZONE_COND_EMPTY: The zone is empty.
43 * @BLK_ZONE_COND_IMP_OPEN: The zone is open, but not explicitly opened.
45 * OPEN ZONE command.
46 * @BLK_ZONE_COND_CLOSED: The zone was [explicitly] closed after writing.
[all …]
/kernel/linux/linux-5.10/Documentation/filesystems/
Dzonefs.rst4 ZoneFS - Zone filesystem for Zoned block devices
10 zonefs is a very simple file system exposing each zone of a zoned block device
24 by allowing SSTables to be stored in a zone file similarly to a regular file
26 of the higher level construct "one file is one zone" can help reducing the
34 space that is divided into zones. A zone is a group of consecutive LBAs and all
41 sequentially. Each sequential zone has a write pointer maintained by the
43 to the device. As a result of this write constraint, LBAs in a sequential zone
45 command (zone reset) before rewriting.
61 representing zones are grouped by zone type, which are themselves represented
62 by sub-directories. This file structure is built entirely using zone information
[all …]
/kernel/linux/linux-4.19/include/linux/
Dmmzone.h104 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
106 * cachelines. There are very few zone structures in the machine, so space
123 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
359 struct zone { struct
362 /* zone watermarks, access with *_wmark_pages(zone) macros */
370 * wasting several GB of ram we must reserve some of the lower zone argument
396 * spanned_pages is the total pages spanned by the zone, including argument
400 * present_pages is physical pages existing within the zone, which
418 * It is a seqlock because it has to be read outside of zone->lock,
422 * The span_seq lock is declared along with zone->lock because it is
[all …]
Dvmstat.h129 * Zone and node-based page accounting with per cpu differentials.
136 static inline void zone_numa_state_add(long x, struct zone *zone, in zone_numa_state_add() argument
139 atomic_long_add(x, &zone->vm_numa_stat[item]); in zone_numa_state_add()
150 static inline unsigned long zone_numa_state_snapshot(struct zone *zone, in zone_numa_state_snapshot() argument
153 long x = atomic_long_read(&zone->vm_numa_stat[item]); in zone_numa_state_snapshot()
157 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; in zone_numa_state_snapshot()
163 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument
166 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add()
197 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument
200 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state()
[all …]
Dmemory_hotplug.h11 struct zone;
48 /* Types for control the zone type of onlined and offlined memory */
57 * Zone resizing functions
59 * Note: any attempt to resize a zone should has pgdat_resize_lock()
60 * zone_span_writelock() both held. This ensure the size of a zone
63 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument
65 return read_seqbegin(&zone->span_seqlock); in zone_span_seqbegin()
67 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) in zone_span_seqretry() argument
69 return read_seqretry(&zone->span_seqlock, iv); in zone_span_seqretry()
71 static inline void zone_span_writelock(struct zone *zone) in zone_span_writelock() argument
[all …]
/kernel/linux/linux-5.10/drivers/md/
Ddm-zoned-metadata.c33 * blocks indicating zone block validity.
39 * the first conventional zone found on disk.
87 * and give the zone ID (dzone_id) mapping the chunk on disk.
88 * This zone may be sequential or random. If it is a sequential
89 * zone, a second zone (bzone_id) used as a write buffer may
90 * also be specified. This second zone will always be a randomly
91 * writeable zone.
137 struct dm_zone *zone; member
174 /* Zone information array */
192 /* Zone allocation management */
[all …]
Ddm-zoned.h94 * Zone descriptor.
97 /* For listing the zone depending on its state */
100 /* Device containing this zone */
103 /* Zone type and state */
106 /* Zone activation reference count */
109 /* Zone id */
112 /* Zone write pointer block (relative to the zone start block) */
115 /* Zone weight (number of valid blocks in the zone) */
118 /* The chunk that the zone maps */
122 * For a sequential data zone, pointer to the random zone
[all …]
/kernel/linux/linux-4.19/drivers/md/
Ddm-zoned-metadata.c31 * blocks indicating zone block validity.
37 * the first conventional zone found on disk.
76 * and give the zone ID (dzone_id) mapping the chunk on disk.
77 * This zone may be sequential or random. If it is a sequential
78 * zone, a second zone (bzone_id) used as a write buffer may
79 * also be specified. This second zone will always be a randomly
80 * writeable zone.
148 /* Zone information array */
166 /* Zone allocation management */
188 unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_id() argument
[all …]
Ddm-zoned.h77 * Zone descriptor.
80 /* For listing the zone depending on its state */
83 /* Zone type and state */
86 /* Zone activation reference count */
89 /* Zone write pointer block (relative to the zone start block) */
92 /* Zone weight (number of valid blocks in the zone) */
95 /* The chunk that the zone maps */
99 * For a sequential data zone, pointer to the random zone
101 * For a buffer zone, this points back to the data zone.
107 * Zone flags.
[all …]
Ddm-zoned-target.c16 * Zone BIO context.
20 struct dm_zone *zone; member
86 struct dm_zone *zone = bioctx->zone; in dmz_bio_endio() local
88 if (zone) { in dmz_bio_endio()
91 dmz_is_seq(zone)) in dmz_bio_endio()
92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); in dmz_bio_endio()
93 dmz_deactivate_zone(zone); in dmz_bio_endio()
116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, in dmz_submit_bio() argument
129 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); in dmz_submit_bio()
139 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) in dmz_submit_bio()
[all …]
/kernel/linux/linux-5.10/include/linux/
Dmmzone.h125 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
127 * cachelines. There are very few zone structures in the machine, so space
144 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
437 struct zone { struct
440 /* zone watermarks, access with *_wmark_pages(zone) macros */
449 * wasting several GB of ram we must reserve some of the lower zone argument
475 * spanned_pages is the total pages spanned by the zone, including argument
479 * present_pages is physical pages existing within the zone, which
497 * It is a seqlock because it has to be read outside of zone->lock,
501 * The span_seq lock is declared along with zone->lock because it is
[all …]
Dmemory_hotplug.h11 struct zone;
48 /* Types for control the zone type of onlined and offlined memory */
52 /* Online the memory. Zone depends, see default_zone_for_pfn(). */
85 * Zone resizing functions
87 * Note: any attempt to resize a zone should has pgdat_resize_lock()
88 * zone_span_writelock() both held. This ensure the size of a zone
91 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument
93 return read_seqbegin(&zone->span_seqlock); in zone_span_seqbegin()
95 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) in zone_span_seqretry() argument
97 return read_seqretry(&zone->span_seqlock, iv); in zone_span_seqretry()
[all …]
Dvmstat.h138 * Zone and node-based page accounting with per cpu differentials.
145 static inline void zone_numa_state_add(long x, struct zone *zone, in zone_numa_state_add() argument
148 atomic_long_add(x, &zone->vm_numa_stat[item]); in zone_numa_state_add()
159 static inline unsigned long zone_numa_state_snapshot(struct zone *zone, in zone_numa_state_snapshot() argument
162 long x = atomic_long_read(&zone->vm_numa_stat[item]); in zone_numa_state_snapshot()
166 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; in zone_numa_state_snapshot()
172 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument
175 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add()
214 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument
217 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state()
[all …]
/kernel/linux/linux-4.19/Documentation/thermal/
Dsysfs-api.txt13 The generic thermal sysfs provides a set of interfaces for thermal zone
17 This how-to focuses on enabling new thermal zone and cooling devices to
19 This solution is platform independent and any type of thermal zone devices
22 The main task of the thermal sysfs driver is to expose thermal zone attributes
25 inputs from thermal zone attributes (the current temperature and trip point
33 1.1 thermal zone device interface
40 This interface function adds a new thermal zone device (sensor) to
44 type: the thermal zone type.
45 trips: the total number of trip points this thermal zone supports.
48 ops: thermal zone device call-backs.
[all …]
/kernel/linux/linux-5.10/mm/
Dpage_alloc.c13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
101 * shuffle the whole zone).
110 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
134 struct zone *zone; member
382 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
443 * prev_end_pfn static that contains the end of previous zone in defer_init()
589 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
597 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
598 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
599 sp = zone->spanned_pages; in page_outside_zone_boundaries()
[all …]
Dmemory_hotplug.c307 * call this function after deciding the zone to which to
355 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
366 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
376 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
391 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
400 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
404 int nid = zone_to_nid(zone); in shrink_zone_span()
406 zone_span_writelock(zone); in shrink_zone_span()
407 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
409 * If the section is smallest section in the zone, it need in shrink_zone_span()
[all …]
Dvmstat.c39 /* zero numa counters within a zone */
40 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument
45 atomic_long_set(&zone->vm_numa_stat[item], 0); in zero_zone_numa_counters()
47 per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item] in zero_zone_numa_counters()
55 struct zone *zone; in zero_zones_numa_counters() local
57 for_each_populated_zone(zone) in zero_zones_numa_counters()
58 zero_zone_numa_counters(zone); in zero_zones_numa_counters()
159 * Manage combined zone based / global counters
172 int calculate_pressure_threshold(struct zone *zone) in calculate_pressure_threshold() argument
185 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); in calculate_pressure_threshold()
[all …]
/kernel/linux/linux-4.19/mm/
Dcompaction.c141 void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
143 zone->compact_considered = 0; in defer_compaction()
144 zone->compact_defer_shift++; in defer_compaction()
146 if (order < zone->compact_order_failed) in defer_compaction()
147 zone->compact_order_failed = order; in defer_compaction()
149 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
150 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
152 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
156 bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
158 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
[all …]
Dmemory_hotplug.c265 * call this function after deciding the zone to which to
313 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
324 if (zone && zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
334 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
349 if (zone && zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
358 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
361 unsigned long zone_start_pfn = zone->zone_start_pfn; in shrink_zone_span()
362 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ in shrink_zone_span()
365 int nid = zone_to_nid(zone); in shrink_zone_span()
367 zone_span_writelock(zone); in shrink_zone_span()
[all …]
/kernel/linux/linux-4.19/fs/btrfs/
Dreada.c228 struct reada_zone *zone; in reada_find_zone() local
234 zone = NULL; in reada_find_zone()
236 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, in reada_find_zone()
238 if (ret == 1 && logical >= zone->start && logical <= zone->end) { in reada_find_zone()
239 kref_get(&zone->refcnt); in reada_find_zone()
241 return zone; in reada_find_zone()
254 zone = kzalloc(sizeof(*zone), GFP_KERNEL); in reada_find_zone()
255 if (!zone) in reada_find_zone()
260 kfree(zone); in reada_find_zone()
264 zone->start = start; in reada_find_zone()
[all …]

12345678910>>...44