1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_BACKING_DEV_DEFS_H 3 #define __LINUX_BACKING_DEV_DEFS_H 4 5 #include <linux/list.h> 6 #include <linux/radix-tree.h> 7 #include <linux/rbtree.h> 8 #include <linux/spinlock.h> 9 #include <linux/percpu_counter.h> 10 #include <linux/percpu-refcount.h> 11 #include <linux/flex_proportions.h> 12 #include <linux/timer.h> 13 #include <linux/workqueue.h> 14 #include <linux/kref.h> 15 #include <linux/refcount.h> 16 #include <linux/android_kabi.h> 17 18 struct page; 19 struct device; 20 struct dentry; 21 22 /* 23 * Bits in bdi_writeback.state 24 */ 25 enum wb_state { 26 WB_registered, /* bdi_register() was done */ 27 WB_writeback_running, /* Writeback is in progress */ 28 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */ 29 WB_start_all, /* nr_pages == 0 (all) work pending */ 30 }; 31 32 enum wb_congested_state { 33 WB_async_congested, /* The async (write) queue is getting full */ 34 WB_sync_congested, /* The sync queue is getting full */ 35 }; 36 37 enum wb_stat_item { 38 WB_RECLAIMABLE, 39 WB_WRITEBACK, 40 WB_DIRTIED, 41 WB_WRITTEN, 42 NR_WB_STAT_ITEMS 43 }; 44 45 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 46 47 /* 48 * why some writeback work was initiated 49 */ 50 enum wb_reason { 51 WB_REASON_BACKGROUND, 52 WB_REASON_VMSCAN, 53 WB_REASON_SYNC, 54 WB_REASON_PERIODIC, 55 WB_REASON_LAPTOP_TIMER, 56 WB_REASON_FS_FREE_SPACE, 57 /* 58 * There is no bdi forker thread any more and works are done 59 * by emergency worker, however, this is TPs userland visible 60 * and we'll be exposing exactly the same information, 61 * so it has a mismatch name. 62 */ 63 WB_REASON_FORKER_THREAD, 64 WB_REASON_FOREIGN_FLUSH, 65 66 WB_REASON_MAX, 67 }; 68 69 struct wb_completion { 70 atomic_t cnt; 71 wait_queue_head_t *waitq; 72 }; 73 74 #define __WB_COMPLETION_INIT(_waitq) \ 75 (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) } 76 77 /* 78 * If one wants to wait for one or more wb_writeback_works, each work's 79 * ->done should be set to a wb_completion defined using the following 80 * macro. Once all work items are issued with wb_queue_work(), the caller 81 * can wait for the completion of all using wb_wait_for_completion(). Work 82 * items which are waited upon aren't freed automatically on completion. 83 */ 84 #define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq) 85 86 #define DEFINE_WB_COMPLETION(cmpl, bdi) \ 87 struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) 88 89 /* 90 * Each wb (bdi_writeback) can perform writeback operations, is measured 91 * and throttled, independently. Without cgroup writeback, each bdi 92 * (bdi_writeback) is served by its embedded bdi->wb. 93 * 94 * On the default hierarchy, blkcg implicitly enables memcg. This allows 95 * using memcg's page ownership for attributing writeback IOs, and every 96 * memcg - blkcg combination can be served by its own wb by assigning a 97 * dedicated wb to each memcg, which enables isolation across different 98 * cgroups and propagation of IO back pressure down from the IO layer upto 99 * the tasks which are generating the dirty pages to be written back. 100 * 101 * A cgroup wb is indexed on its bdi by the ID of the associated memcg, 102 * refcounted with the number of inodes attached to it, and pins the memcg 103 * and the corresponding blkcg. As the corresponding blkcg for a memcg may 104 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb 105 * is tested for blkcg after lookup and removed from index on mismatch so 106 * that a new wb for the combination can be created. 107 */ 108 struct bdi_writeback { 109 struct backing_dev_info *bdi; /* our parent bdi */ 110 111 unsigned long state; /* Always use atomic bitops on this */ 112 unsigned long last_old_flush; /* last old data flush */ 113 114 struct list_head b_dirty; /* dirty inodes */ 115 struct list_head b_io; /* parked for writeback */ 116 struct list_head b_more_io; /* parked for more writeback */ 117 struct list_head b_dirty_time; /* time stamps are dirty */ 118 spinlock_t list_lock; /* protects the b_* lists */ 119 120 struct percpu_counter stat[NR_WB_STAT_ITEMS]; 121 122 unsigned long congested; /* WB_[a]sync_congested flags */ 123 124 unsigned long bw_time_stamp; /* last time write bw is updated */ 125 unsigned long dirtied_stamp; 126 unsigned long written_stamp; /* pages written at bw_time_stamp */ 127 unsigned long write_bandwidth; /* the estimated write bandwidth */ 128 unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */ 129 130 /* 131 * The base dirty throttle rate, re-calculated on every 200ms. 132 * All the bdi tasks' dirty rate will be curbed under it. 133 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit 134 * in small steps and is much more smooth/stable than the latter. 135 */ 136 unsigned long dirty_ratelimit; 137 unsigned long balanced_dirty_ratelimit; 138 139 struct fprop_local_percpu completions; 140 int dirty_exceeded; 141 enum wb_reason start_all_reason; 142 143 spinlock_t work_lock; /* protects work_list & dwork scheduling */ 144 struct list_head work_list; 145 struct delayed_work dwork; /* work item used for writeback */ 146 147 unsigned long dirty_sleep; /* last wait */ 148 149 struct list_head bdi_node; /* anchored at bdi->wb_list */ 150 151 #ifdef CONFIG_CGROUP_WRITEBACK 152 struct percpu_ref refcnt; /* used only for !root wb's */ 153 struct fprop_local_percpu memcg_completions; 154 struct cgroup_subsys_state *memcg_css; /* the associated memcg */ 155 struct cgroup_subsys_state *blkcg_css; /* and blkcg */ 156 struct list_head memcg_node; /* anchored at memcg->cgwb_list */ 157 struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */ 158 159 union { 160 struct work_struct release_work; 161 struct rcu_head rcu; 162 }; 163 #endif 164 165 ANDROID_KABI_RESERVE(1); 166 ANDROID_KABI_RESERVE(2); 167 }; 168 169 struct backing_dev_info { 170 u64 id; 171 struct rb_node rb_node; /* keyed by ->id */ 172 struct list_head bdi_list; 173 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ 174 unsigned long io_pages; /* max allowed IO size */ 175 176 struct kref refcnt; /* Reference counter for the structure */ 177 unsigned int capabilities; /* Device capabilities */ 178 unsigned int min_ratio; 179 unsigned int max_ratio, max_prop_frac; 180 181 /* 182 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are 183 * any dirty wbs, which is depended upon by bdi_has_dirty(). 184 */ 185 atomic_long_t tot_write_bandwidth; 186 187 struct bdi_writeback wb; /* the root writeback info for this bdi */ 188 struct list_head wb_list; /* list of all wbs */ 189 #ifdef CONFIG_CGROUP_WRITEBACK 190 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ 191 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ 192 struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ 193 #endif 194 wait_queue_head_t wb_waitq; 195 196 struct device *dev; 197 char dev_name[64]; 198 struct device *owner; 199 200 struct timer_list laptop_mode_wb_timer; 201 202 #ifdef CONFIG_DEBUG_FS 203 struct dentry *debug_dir; 204 #endif 205 206 ANDROID_KABI_RESERVE(1); 207 ANDROID_KABI_RESERVE(2); 208 }; 209 210 enum { 211 BLK_RW_ASYNC = 0, 212 BLK_RW_SYNC = 1, 213 }; 214 215 void clear_bdi_congested(struct backing_dev_info *bdi, int sync); 216 void set_bdi_congested(struct backing_dev_info *bdi, int sync); 217 218 struct wb_lock_cookie { 219 bool locked; 220 unsigned long flags; 221 }; 222 223 #ifdef CONFIG_CGROUP_WRITEBACK 224 225 /** 226 * wb_tryget - try to increment a wb's refcount 227 * @wb: bdi_writeback to get 228 */ wb_tryget(struct bdi_writeback * wb)229 static inline bool wb_tryget(struct bdi_writeback *wb) 230 { 231 if (wb != &wb->bdi->wb) 232 return percpu_ref_tryget(&wb->refcnt); 233 return true; 234 } 235 236 /** 237 * wb_get - increment a wb's refcount 238 * @wb: bdi_writeback to get 239 */ wb_get(struct bdi_writeback * wb)240 static inline void wb_get(struct bdi_writeback *wb) 241 { 242 if (wb != &wb->bdi->wb) 243 percpu_ref_get(&wb->refcnt); 244 } 245 246 /** 247 * wb_put - decrement a wb's refcount 248 * @wb: bdi_writeback to put 249 */ wb_put(struct bdi_writeback * wb)250 static inline void wb_put(struct bdi_writeback *wb) 251 { 252 if (WARN_ON_ONCE(!wb->bdi)) { 253 /* 254 * A driver bug might cause a file to be removed before bdi was 255 * initialized. 256 */ 257 return; 258 } 259 260 if (wb != &wb->bdi->wb) 261 percpu_ref_put(&wb->refcnt); 262 } 263 264 /** 265 * wb_dying - is a wb dying? 266 * @wb: bdi_writeback of interest 267 * 268 * Returns whether @wb is unlinked and being drained. 269 */ wb_dying(struct bdi_writeback * wb)270 static inline bool wb_dying(struct bdi_writeback *wb) 271 { 272 return percpu_ref_is_dying(&wb->refcnt); 273 } 274 275 #else /* CONFIG_CGROUP_WRITEBACK */ 276 wb_tryget(struct bdi_writeback * wb)277 static inline bool wb_tryget(struct bdi_writeback *wb) 278 { 279 return true; 280 } 281 wb_get(struct bdi_writeback * wb)282 static inline void wb_get(struct bdi_writeback *wb) 283 { 284 } 285 wb_put(struct bdi_writeback * wb)286 static inline void wb_put(struct bdi_writeback *wb) 287 { 288 } 289 wb_dying(struct bdi_writeback * wb)290 static inline bool wb_dying(struct bdi_writeback *wb) 291 { 292 return false; 293 } 294 295 #endif /* CONFIG_CGROUP_WRITEBACK */ 296 297 #endif /* __LINUX_BACKING_DEV_DEFS_H */ 298