1 /*
2 * Copyright (C) 2004-2005 IBM Corp. All Rights Reserved.
3 * Copyright (C) 2006-2009 NEC Corporation.
4 *
5 * dm-queue-length.c
6 *
7 * Module Author: Stefan Bader, IBM
8 * Modified by: Kiyoshi Ueda, NEC
9 *
10 * This file is released under the GPL.
11 *
12 * queue-length path selector - choose a path with the least number of
13 * in-flight I/Os.
14 */
15
16 #include "dm.h"
17 #include "dm-path-selector.h"
18
19 #include <linux/slab.h>
20 #include <linux/ctype.h>
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/atomic.h>
24
25 #define DM_MSG_PREFIX "multipath queue-length"
26 #define QL_MIN_IO 1
27 #define QL_VERSION "0.2.0"
28
29 struct selector {
30 struct list_head valid_paths;
31 struct list_head failed_paths;
32 spinlock_t lock;
33 };
34
35 struct path_info {
36 struct list_head list;
37 struct dm_path *path;
38 unsigned int repeat_count;
39 atomic_t qlen; /* the number of in-flight I/Os */
40 };
41
alloc_selector(void)42 static struct selector *alloc_selector(void)
43 {
44 struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
45
46 if (s) {
47 INIT_LIST_HEAD(&s->valid_paths);
48 INIT_LIST_HEAD(&s->failed_paths);
49 spin_lock_init(&s->lock);
50 }
51
52 return s;
53 }
54
ql_create(struct path_selector * ps,unsigned int argc,char ** argv)55 static int ql_create(struct path_selector *ps, unsigned int argc, char **argv)
56 {
57 struct selector *s = alloc_selector();
58
59 if (!s)
60 return -ENOMEM;
61
62 ps->context = s;
63 return 0;
64 }
65
ql_free_paths(struct list_head * paths)66 static void ql_free_paths(struct list_head *paths)
67 {
68 struct path_info *pi, *next;
69
70 list_for_each_entry_safe(pi, next, paths, list) {
71 list_del(&pi->list);
72 kfree(pi);
73 }
74 }
75
ql_destroy(struct path_selector * ps)76 static void ql_destroy(struct path_selector *ps)
77 {
78 struct selector *s = ps->context;
79
80 ql_free_paths(&s->valid_paths);
81 ql_free_paths(&s->failed_paths);
82 kfree(s);
83 ps->context = NULL;
84 }
85
ql_status(struct path_selector * ps,struct dm_path * path,status_type_t type,char * result,unsigned int maxlen)86 static int ql_status(struct path_selector *ps, struct dm_path *path,
87 status_type_t type, char *result, unsigned int maxlen)
88 {
89 unsigned int sz = 0;
90 struct path_info *pi;
91
92 /* When called with NULL path, return selector status/args. */
93 if (!path)
94 DMEMIT("0 ");
95 else {
96 pi = path->pscontext;
97
98 switch (type) {
99 case STATUSTYPE_INFO:
100 DMEMIT("%d ", atomic_read(&pi->qlen));
101 break;
102 case STATUSTYPE_TABLE:
103 DMEMIT("%u ", pi->repeat_count);
104 break;
105 case STATUSTYPE_IMA:
106 *result = '\0';
107 break;
108 }
109 }
110
111 return sz;
112 }
113
ql_add_path(struct path_selector * ps,struct dm_path * path,int argc,char ** argv,char ** error)114 static int ql_add_path(struct path_selector *ps, struct dm_path *path,
115 int argc, char **argv, char **error)
116 {
117 struct selector *s = ps->context;
118 struct path_info *pi;
119 unsigned int repeat_count = QL_MIN_IO;
120 char dummy;
121 unsigned long flags;
122
123 /*
124 * Arguments: [<repeat_count>]
125 * <repeat_count>: The number of I/Os before switching path.
126 * If not given, default (QL_MIN_IO) is used.
127 */
128 if (argc > 1) {
129 *error = "queue-length ps: incorrect number of arguments";
130 return -EINVAL;
131 }
132
133 if ((argc == 1) && (sscanf(argv[0], "%u%c", &repeat_count, &dummy) != 1)) {
134 *error = "queue-length ps: invalid repeat count";
135 return -EINVAL;
136 }
137
138 if (repeat_count > 1) {
139 DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
140 repeat_count = 1;
141 }
142
143 /* Allocate the path information structure */
144 pi = kmalloc(sizeof(*pi), GFP_KERNEL);
145 if (!pi) {
146 *error = "queue-length ps: Error allocating path information";
147 return -ENOMEM;
148 }
149
150 pi->path = path;
151 pi->repeat_count = repeat_count;
152 atomic_set(&pi->qlen, 0);
153
154 path->pscontext = pi;
155
156 spin_lock_irqsave(&s->lock, flags);
157 list_add_tail(&pi->list, &s->valid_paths);
158 spin_unlock_irqrestore(&s->lock, flags);
159
160 return 0;
161 }
162
ql_fail_path(struct path_selector * ps,struct dm_path * path)163 static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
164 {
165 struct selector *s = ps->context;
166 struct path_info *pi = path->pscontext;
167 unsigned long flags;
168
169 spin_lock_irqsave(&s->lock, flags);
170 list_move(&pi->list, &s->failed_paths);
171 spin_unlock_irqrestore(&s->lock, flags);
172 }
173
ql_reinstate_path(struct path_selector * ps,struct dm_path * path)174 static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
175 {
176 struct selector *s = ps->context;
177 struct path_info *pi = path->pscontext;
178 unsigned long flags;
179
180 spin_lock_irqsave(&s->lock, flags);
181 list_move_tail(&pi->list, &s->valid_paths);
182 spin_unlock_irqrestore(&s->lock, flags);
183
184 return 0;
185 }
186
187 /*
188 * Select a path having the minimum number of in-flight I/Os
189 */
ql_select_path(struct path_selector * ps,size_t nr_bytes)190 static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
191 {
192 struct selector *s = ps->context;
193 struct path_info *pi = NULL, *best = NULL;
194 struct dm_path *ret = NULL;
195 unsigned long flags;
196
197 spin_lock_irqsave(&s->lock, flags);
198 if (list_empty(&s->valid_paths))
199 goto out;
200
201 list_for_each_entry(pi, &s->valid_paths, list) {
202 if (!best ||
203 (atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
204 best = pi;
205
206 if (!atomic_read(&best->qlen))
207 break;
208 }
209
210 if (!best)
211 goto out;
212
213 /* Move most recently used to least preferred to evenly balance. */
214 list_move_tail(&best->list, &s->valid_paths);
215
216 ret = best->path;
217 out:
218 spin_unlock_irqrestore(&s->lock, flags);
219 return ret;
220 }
221
ql_start_io(struct path_selector * ps,struct dm_path * path,size_t nr_bytes)222 static int ql_start_io(struct path_selector *ps, struct dm_path *path,
223 size_t nr_bytes)
224 {
225 struct path_info *pi = path->pscontext;
226
227 atomic_inc(&pi->qlen);
228
229 return 0;
230 }
231
ql_end_io(struct path_selector * ps,struct dm_path * path,size_t nr_bytes,u64 start_time)232 static int ql_end_io(struct path_selector *ps, struct dm_path *path,
233 size_t nr_bytes, u64 start_time)
234 {
235 struct path_info *pi = path->pscontext;
236
237 atomic_dec(&pi->qlen);
238
239 return 0;
240 }
241
242 static struct path_selector_type ql_ps = {
243 .name = "queue-length",
244 .module = THIS_MODULE,
245 .table_args = 1,
246 .info_args = 1,
247 .create = ql_create,
248 .destroy = ql_destroy,
249 .status = ql_status,
250 .add_path = ql_add_path,
251 .fail_path = ql_fail_path,
252 .reinstate_path = ql_reinstate_path,
253 .select_path = ql_select_path,
254 .start_io = ql_start_io,
255 .end_io = ql_end_io,
256 };
257
dm_ql_init(void)258 static int __init dm_ql_init(void)
259 {
260 int r = dm_register_path_selector(&ql_ps);
261
262 if (r < 0)
263 DMERR("register failed %d", r);
264
265 DMINFO("version " QL_VERSION " loaded");
266
267 return r;
268 }
269
dm_ql_exit(void)270 static void __exit dm_ql_exit(void)
271 {
272 int r = dm_unregister_path_selector(&ql_ps);
273
274 if (r < 0)
275 DMERR("unregister failed %d", r);
276 }
277
278 module_init(dm_ql_init);
279 module_exit(dm_ql_exit);
280
281 MODULE_AUTHOR("Stefan Bader <Stefan.Bader at de.ibm.com>");
282 MODULE_DESCRIPTION(
283 "(C) Copyright IBM Corp. 2004,2005 All Rights Reserved.\n"
284 DM_NAME " path selector to balance the number of in-flight I/Os"
285 );
286 MODULE_LICENSE("GPL");
287