• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * zfcp device driver
3  *
4  * Header file for zfcp qdio interface
5  *
6  * Copyright IBM Corporation 2010
7  */
8 
9 #ifndef ZFCP_QDIO_H
10 #define ZFCP_QDIO_H
11 
12 #include <asm/qdio.h>
13 
14 #define ZFCP_QDIO_SBALE_LEN	PAGE_SIZE
15 
16 /* Max SBALS for chaining */
17 #define ZFCP_QDIO_MAX_SBALS_PER_REQ	36
18 
19 /**
20  * struct zfcp_qdio - basic qdio data structure
21  * @res_q: response queue
22  * @req_q: request queue
23  * @req_q_idx: index of next free buffer
24  * @req_q_free: number of free buffers in queue
25  * @stat_lock: lock to protect req_q_util and req_q_time
26  * @req_q_lock: lock to serialize access to request queue
27  * @req_q_time: time of last fill level change
28  * @req_q_util: used for accounting
29  * @req_q_full: queue full incidents
30  * @req_q_wq: used to wait for SBAL availability
31  * @adapter: adapter used in conjunction with this qdio structure
32  */
33 struct zfcp_qdio {
34 	struct qdio_buffer	*res_q[QDIO_MAX_BUFFERS_PER_Q];
35 	struct qdio_buffer	*req_q[QDIO_MAX_BUFFERS_PER_Q];
36 	u8			req_q_idx;
37 	atomic_t		req_q_free;
38 	spinlock_t		stat_lock;
39 	spinlock_t		req_q_lock;
40 	unsigned long long	req_q_time;
41 	u64			req_q_util;
42 	atomic_t		req_q_full;
43 	wait_queue_head_t	req_q_wq;
44 	struct zfcp_adapter	*adapter;
45 	u16			max_sbale_per_sbal;
46 	u16			max_sbale_per_req;
47 };
48 
49 /**
50  * struct zfcp_qdio_req - qdio queue related values for a request
51  * @sbtype: sbal type flags for sbale 0
52  * @sbal_number: number of free sbals
53  * @sbal_first: first sbal for this request
54  * @sbal_last: last sbal for this request
55  * @sbal_limit: last possible sbal for this request
56  * @sbale_curr: current sbale at creation of this request
57  * @sbal_response: sbal used in interrupt
58  * @qdio_outb_usage: usage of outbound queue
59  */
60 struct zfcp_qdio_req {
61 	u8	sbtype;
62 	u8	sbal_number;
63 	u8	sbal_first;
64 	u8	sbal_last;
65 	u8	sbal_limit;
66 	u8	sbale_curr;
67 	u8	sbal_response;
68 	u16	qdio_outb_usage;
69 };
70 
71 /**
72  * zfcp_qdio_sbale_req - return pointer to sbale on req_q for a request
73  * @qdio: pointer to struct zfcp_qdio
74  * @q_rec: pointer to struct zfcp_qdio_req
75  * Returns: pointer to qdio_buffer_element (sbale) structure
76  */
77 static inline struct qdio_buffer_element *
zfcp_qdio_sbale_req(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)78 zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
79 {
80 	return &qdio->req_q[q_req->sbal_last]->element[0];
81 }
82 
83 /**
84  * zfcp_qdio_sbale_curr - return current sbale on req_q for a request
85  * @qdio: pointer to struct zfcp_qdio
86  * @fsf_req: pointer to struct zfcp_fsf_req
87  * Returns: pointer to qdio_buffer_element (sbale) structure
88  */
89 static inline struct qdio_buffer_element *
zfcp_qdio_sbale_curr(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)90 zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
91 {
92 	return &qdio->req_q[q_req->sbal_last]->element[q_req->sbale_curr];
93 }
94 
95 /**
96  * zfcp_qdio_req_init - initialize qdio request
97  * @qdio: request queue where to start putting the request
98  * @q_req: the qdio request to start
99  * @req_id: The request id
100  * @sbtype: type flags to set for all sbals
101  * @data: First data block
102  * @len: Length of first data block
103  *
104  * This is the start of putting the request into the queue, the last
105  * step is passing the request to zfcp_qdio_send. The request queue
106  * lock must be held during the whole process from init to send.
107  */
108 static inline
zfcp_qdio_req_init(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,unsigned long req_id,u8 sbtype,void * data,u32 len)109 void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
110 			unsigned long req_id, u8 sbtype, void *data, u32 len)
111 {
112 	struct qdio_buffer_element *sbale;
113 	int count = min(atomic_read(&qdio->req_q_free),
114 			ZFCP_QDIO_MAX_SBALS_PER_REQ);
115 
116 	q_req->sbal_first = q_req->sbal_last = qdio->req_q_idx;
117 	q_req->sbal_number = 1;
118 	q_req->sbtype = sbtype;
119 	q_req->sbale_curr = 1;
120 	q_req->sbal_limit = (q_req->sbal_first + count - 1)
121 					% QDIO_MAX_BUFFERS_PER_Q;
122 
123 	sbale = zfcp_qdio_sbale_req(qdio, q_req);
124 	sbale->addr = (void *) req_id;
125 	sbale->eflags = 0;
126 	sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
127 
128 	if (unlikely(!data))
129 		return;
130 	sbale++;
131 	sbale->addr = data;
132 	sbale->length = len;
133 }
134 
135 /**
136  * zfcp_qdio_fill_next - Fill next sbale, only for single sbal requests
137  * @qdio: pointer to struct zfcp_qdio
138  * @q_req: pointer to struct zfcp_queue_req
139  *
140  * This is only required for single sbal requests, calling it when
141  * wrapping around to the next sbal is a bug.
142  */
143 static inline
zfcp_qdio_fill_next(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,void * data,u32 len)144 void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
145 			 void *data, u32 len)
146 {
147 	struct qdio_buffer_element *sbale;
148 
149 	BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
150 	q_req->sbale_curr++;
151 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
152 	sbale->addr = data;
153 	sbale->length = len;
154 }
155 
156 /**
157  * zfcp_qdio_set_sbale_last - set last entry flag in current sbale
158  * @qdio: pointer to struct zfcp_qdio
159  * @q_req: pointer to struct zfcp_queue_req
160  */
161 static inline
zfcp_qdio_set_sbale_last(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)162 void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
163 			      struct zfcp_qdio_req *q_req)
164 {
165 	struct qdio_buffer_element *sbale;
166 
167 	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
168 	sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
169 }
170 
171 /**
172  * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
173  * @sg: The scatterlist where to check the data size
174  *
175  * Returns: 1 when one sbale is enough for the data in the scatterlist,
176  *	    0 if not.
177  */
178 static inline
zfcp_qdio_sg_one_sbale(struct scatterlist * sg)179 int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
180 {
181 	return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN;
182 }
183 
184 /**
185  * zfcp_qdio_skip_to_last_sbale - skip to last sbale in sbal
186  * @q_req: The current zfcp_qdio_req
187  */
188 static inline
zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)189 void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
190 				  struct zfcp_qdio_req *q_req)
191 {
192 	q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
193 }
194 
195 /**
196  * zfcp_qdio_sbal_limit - set the sbal limit for a request in q_req
197  * @qdio: pointer to struct zfcp_qdio
198  * @q_req: The current zfcp_qdio_req
199  * @max_sbals: maximum number of SBALs allowed
200  */
201 static inline
zfcp_qdio_sbal_limit(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,int max_sbals)202 void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
203 			  struct zfcp_qdio_req *q_req, int max_sbals)
204 {
205 	int count = min(atomic_read(&qdio->req_q_free), max_sbals);
206 
207 	q_req->sbal_limit = (q_req->sbal_first + count - 1) %
208 				QDIO_MAX_BUFFERS_PER_Q;
209 }
210 
211 /**
212  * zfcp_qdio_set_data_div - set data division count
213  * @qdio: pointer to struct zfcp_qdio
214  * @q_req: The current zfcp_qdio_req
215  * @count: The data division count
216  */
217 static inline
zfcp_qdio_set_data_div(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,u32 count)218 void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
219 			    struct zfcp_qdio_req *q_req, u32 count)
220 {
221 	struct qdio_buffer_element *sbale;
222 
223 	sbale = qdio->req_q[q_req->sbal_first]->element;
224 	sbale->length = count;
225 }
226 
227 /**
228  * zfcp_qdio_sbale_count - count sbale used
229  * @sg: pointer to struct scatterlist
230  */
231 static inline
zfcp_qdio_sbale_count(struct scatterlist * sg)232 unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
233 {
234 	unsigned int count = 0;
235 
236 	for (; sg; sg = sg_next(sg))
237 		count++;
238 
239 	return count;
240 }
241 
242 /**
243  * zfcp_qdio_real_bytes - count bytes used
244  * @sg: pointer to struct scatterlist
245  */
246 static inline
zfcp_qdio_real_bytes(struct scatterlist * sg)247 unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
248 {
249 	unsigned int real_bytes = 0;
250 
251 	for (; sg; sg = sg_next(sg))
252 		real_bytes += sg->length;
253 
254 	return real_bytes;
255 }
256 
257 /**
258  * zfcp_qdio_set_scount - set SBAL count value
259  * @qdio: pointer to struct zfcp_qdio
260  * @q_req: The current zfcp_qdio_req
261  */
262 static inline
zfcp_qdio_set_scount(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)263 void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
264 {
265 	struct qdio_buffer_element *sbale;
266 
267 	sbale = qdio->req_q[q_req->sbal_first]->element;
268 	sbale->scount = q_req->sbal_number - 1;
269 }
270 
271 #endif /* ZFCP_QDIO_H */
272