• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-rl.c - pblk's rate limiter for user I/O
17  *
18  */
19 
20 #include "pblk.h"
21 
pblk_rl_kick_u_timer(struct pblk_rl * rl)22 static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
23 {
24 	mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
25 }
26 
pblk_rl_is_limit(struct pblk_rl * rl)27 int pblk_rl_is_limit(struct pblk_rl *rl)
28 {
29 	int rb_space;
30 
31 	rb_space = atomic_read(&rl->rb_space);
32 
33 	return (rb_space == 0);
34 }
35 
pblk_rl_user_may_insert(struct pblk_rl * rl,int nr_entries)36 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
37 {
38 	int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
39 	int rb_space = atomic_read(&rl->rb_space);
40 
41 	if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
42 		return NVM_IO_ERR;
43 
44 	if (rb_user_cnt >= rl->rb_user_max)
45 		return NVM_IO_REQUEUE;
46 
47 	return NVM_IO_OK;
48 }
49 
pblk_rl_inserted(struct pblk_rl * rl,int nr_entries)50 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
51 {
52 	int rb_space = atomic_read(&rl->rb_space);
53 
54 	if (unlikely(rb_space >= 0))
55 		atomic_sub(nr_entries, &rl->rb_space);
56 }
57 
pblk_rl_gc_may_insert(struct pblk_rl * rl,int nr_entries)58 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
59 {
60 	int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
61 	int rb_user_active;
62 
63 	/* If there is no user I/O let GC take over space on the write buffer */
64 	rb_user_active = READ_ONCE(rl->rb_user_active);
65 	return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
66 }
67 
pblk_rl_user_in(struct pblk_rl * rl,int nr_entries)68 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
69 {
70 	atomic_add(nr_entries, &rl->rb_user_cnt);
71 
72 	/* Release user I/O state. Protect from GC */
73 	smp_store_release(&rl->rb_user_active, 1);
74 	pblk_rl_kick_u_timer(rl);
75 }
76 
pblk_rl_werr_line_in(struct pblk_rl * rl)77 void pblk_rl_werr_line_in(struct pblk_rl *rl)
78 {
79 	atomic_inc(&rl->werr_lines);
80 }
81 
pblk_rl_werr_line_out(struct pblk_rl * rl)82 void pblk_rl_werr_line_out(struct pblk_rl *rl)
83 {
84 	atomic_dec(&rl->werr_lines);
85 }
86 
pblk_rl_gc_in(struct pblk_rl * rl,int nr_entries)87 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
88 {
89 	atomic_add(nr_entries, &rl->rb_gc_cnt);
90 }
91 
pblk_rl_out(struct pblk_rl * rl,int nr_user,int nr_gc)92 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
93 {
94 	atomic_sub(nr_user, &rl->rb_user_cnt);
95 	atomic_sub(nr_gc, &rl->rb_gc_cnt);
96 }
97 
pblk_rl_nr_free_blks(struct pblk_rl * rl)98 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
99 {
100 	return atomic_read(&rl->free_blocks);
101 }
102 
pblk_rl_nr_user_free_blks(struct pblk_rl * rl)103 unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
104 {
105 	return atomic_read(&rl->free_user_blocks);
106 }
107 
__pblk_rl_update_rates(struct pblk_rl * rl,unsigned long free_blocks)108 static void __pblk_rl_update_rates(struct pblk_rl *rl,
109 				   unsigned long free_blocks)
110 {
111 	struct pblk *pblk = container_of(rl, struct pblk, rl);
112 	int max = rl->rb_budget;
113 	int werr_gc_needed = atomic_read(&rl->werr_lines);
114 
115 	if (free_blocks >= rl->high) {
116 		if (werr_gc_needed) {
117 			/* Allocate a small budget for recovering
118 			 * lines with write errors
119 			 */
120 			rl->rb_gc_max = 1 << rl->rb_windows_pw;
121 			rl->rb_user_max = max - rl->rb_gc_max;
122 			rl->rb_state = PBLK_RL_WERR;
123 		} else {
124 			rl->rb_user_max = max;
125 			rl->rb_gc_max = 0;
126 			rl->rb_state = PBLK_RL_OFF;
127 		}
128 	} else if (free_blocks < rl->high) {
129 		int shift = rl->high_pw - rl->rb_windows_pw;
130 		int user_windows = free_blocks >> shift;
131 		int user_max = user_windows << ilog2(NVM_MAX_VLBA);
132 
133 		rl->rb_user_max = user_max;
134 		rl->rb_gc_max = max - user_max;
135 
136 		if (free_blocks <= rl->rsv_blocks) {
137 			rl->rb_user_max = 0;
138 			rl->rb_gc_max = max;
139 		}
140 
141 		/* In the worst case, we will need to GC lines in the low list
142 		 * (high valid sector count). If there are lines to GC on high
143 		 * or mid lists, these will be prioritized
144 		 */
145 		rl->rb_state = PBLK_RL_LOW;
146 	}
147 
148 	if (rl->rb_state != PBLK_RL_OFF)
149 		pblk_gc_should_start(pblk);
150 	else
151 		pblk_gc_should_stop(pblk);
152 }
153 
pblk_rl_update_rates(struct pblk_rl * rl)154 void pblk_rl_update_rates(struct pblk_rl *rl)
155 {
156 	__pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
157 }
158 
pblk_rl_free_lines_inc(struct pblk_rl * rl,struct pblk_line * line)159 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
160 {
161 	int blk_in_line = atomic_read(&line->blk_in_line);
162 	int free_blocks;
163 
164 	atomic_add(blk_in_line, &rl->free_blocks);
165 	free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
166 
167 	__pblk_rl_update_rates(rl, free_blocks);
168 }
169 
pblk_rl_free_lines_dec(struct pblk_rl * rl,struct pblk_line * line,bool used)170 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
171 			    bool used)
172 {
173 	int blk_in_line = atomic_read(&line->blk_in_line);
174 	int free_blocks;
175 
176 	atomic_sub(blk_in_line, &rl->free_blocks);
177 
178 	if (used)
179 		free_blocks = atomic_sub_return(blk_in_line,
180 							&rl->free_user_blocks);
181 	else
182 		free_blocks = atomic_read(&rl->free_user_blocks);
183 
184 	__pblk_rl_update_rates(rl, free_blocks);
185 }
186 
pblk_rl_high_thrs(struct pblk_rl * rl)187 int pblk_rl_high_thrs(struct pblk_rl *rl)
188 {
189 	return rl->high;
190 }
191 
pblk_rl_max_io(struct pblk_rl * rl)192 int pblk_rl_max_io(struct pblk_rl *rl)
193 {
194 	return rl->rb_max_io;
195 }
196 
pblk_rl_u_timer(struct timer_list * t)197 static void pblk_rl_u_timer(struct timer_list *t)
198 {
199 	struct pblk_rl *rl = from_timer(rl, t, u_timer);
200 
201 	/* Release user I/O state. Protect from GC */
202 	smp_store_release(&rl->rb_user_active, 0);
203 }
204 
pblk_rl_free(struct pblk_rl * rl)205 void pblk_rl_free(struct pblk_rl *rl)
206 {
207 	del_timer(&rl->u_timer);
208 }
209 
pblk_rl_init(struct pblk_rl * rl,int budget,int threshold)210 void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
211 {
212 	struct pblk *pblk = container_of(rl, struct pblk, rl);
213 	struct nvm_tgt_dev *dev = pblk->dev;
214 	struct nvm_geo *geo = &dev->geo;
215 	struct pblk_line_mgmt *l_mg = &pblk->l_mg;
216 	struct pblk_line_meta *lm = &pblk->lm;
217 	int sec_meta, blk_meta;
218 	unsigned int rb_windows;
219 
220 	/* Consider sectors used for metadata */
221 	sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
222 	blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
223 
224 	rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
225 	rl->high_pw = get_count_order(rl->high);
226 
227 	rl->rsv_blocks = pblk_get_min_chks(pblk);
228 
229 	/* This will always be a power-of-2 */
230 	rb_windows = budget / NVM_MAX_VLBA;
231 	rl->rb_windows_pw = get_count_order(rb_windows);
232 
233 	/* To start with, all buffer is available to user I/O writers */
234 	rl->rb_budget = budget;
235 	rl->rb_user_max = budget;
236 	rl->rb_gc_max = 0;
237 	rl->rb_state = PBLK_RL_HIGH;
238 
239 	/* Maximize I/O size and ansure that back threshold is respected */
240 	if (threshold)
241 		rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
242 	else
243 		rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
244 
245 	atomic_set(&rl->rb_user_cnt, 0);
246 	atomic_set(&rl->rb_gc_cnt, 0);
247 	atomic_set(&rl->rb_space, -1);
248 	atomic_set(&rl->werr_lines, 0);
249 
250 	timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
251 
252 	rl->rb_user_active = 0;
253 	rl->rb_gc_active = 0;
254 }
255