• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Universal Flash Storage Host Performance Booster
4  *
5  * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
6  *
7  * Authors:
8  *	Yongmyung Lee <ymhungry.lee@samsung.com>
9  *	Jinyoung Choi <j-young.choi@samsung.com>
10  */
11 
12 #include <asm/unaligned.h>
13 #include <linux/async.h>
14 
15 #include "ufshcd.h"
16 #include "ufshcd-add-info.h"
17 #include "ufshpb.h"
18 #include "../sd.h"
19 
20 #define ACTIVATION_THRESHOLD 8 /* 8 IOs */
21 #define READ_TO_MS 1000
22 #define READ_TO_EXPIRIES 100
23 #define POLLING_INTERVAL_MS 200
24 #define THROTTLE_MAP_REQ_DEFAULT 1
25 
26 /* memory management */
27 static struct kmem_cache *ufshpb_mctx_cache;
28 static mempool_t *ufshpb_mctx_pool;
29 static mempool_t *ufshpb_page_pool;
30 /* A cache size of 2MB can cache ppn in the 1GB range. */
31 static unsigned int ufshpb_host_map_kbytes = 2048;
32 static int tot_active_srgn_pages;
33 
34 static struct workqueue_struct *ufshpb_wq;
35 
36 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
37 				      int srgn_idx);
38 
ufs_hba_to_hpb(struct ufs_hba * hba)39 static inline struct ufshpb_dev_info *ufs_hba_to_hpb(struct ufs_hba *hba)
40 {
41 	return &ufs_hba_add_info(hba)->hpb_dev;
42 }
43 
ufshpb_is_allowed(struct ufs_hba * hba)44 bool ufshpb_is_allowed(struct ufs_hba *hba)
45 {
46 	return !(ufs_hba_to_hpb(hba)->hpb_disabled);
47 }
48 
49 /* HPB version 1.0 is called as legacy version. */
ufshpb_is_legacy(struct ufs_hba * hba)50 bool ufshpb_is_legacy(struct ufs_hba *hba)
51 {
52 	return ufs_hba_to_hpb(hba)->is_legacy;
53 }
54 
ufshpb_get_hpb_data(struct scsi_device * sdev)55 static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
56 {
57 	return sdev->hostdata;
58 }
59 
ufshpb_get_state(struct ufshpb_lu * hpb)60 static int ufshpb_get_state(struct ufshpb_lu *hpb)
61 {
62 	return atomic_read(&hpb->hpb_state);
63 }
64 
ufshpb_set_state(struct ufshpb_lu * hpb,int state)65 static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
66 {
67 	atomic_set(&hpb->hpb_state, state);
68 }
69 
ufshpb_is_valid_srgn(struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)70 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
71 				struct ufshpb_subregion *srgn)
72 {
73 	return rgn->rgn_state != HPB_RGN_INACTIVE &&
74 		srgn->srgn_state == HPB_SRGN_VALID;
75 }
76 
ufshpb_is_read_cmd(struct scsi_cmnd * cmd)77 static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
78 {
79 	return req_op(cmd->request) == REQ_OP_READ;
80 }
81 
ufshpb_is_write_or_discard(struct scsi_cmnd * cmd)82 static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
83 {
84 	return op_is_write(req_op(cmd->request)) ||
85 	       op_is_discard(req_op(cmd->request));
86 }
87 
ufshpb_is_supported_chunk(struct ufshpb_lu * hpb,int transfer_len)88 static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
89 {
90 	return transfer_len <= hpb->pre_req_max_tr_len;
91 }
92 
93 /*
94  * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
95  * default. It is possible to change range of transfer_len through sysfs.
96  */
ufshpb_is_required_wb(struct ufshpb_lu * hpb,int len)97 static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
98 {
99 	return len > hpb->pre_req_min_tr_len &&
100 	       len <= hpb->pre_req_max_tr_len;
101 }
102 
ufshpb_is_general_lun(int lun)103 static bool ufshpb_is_general_lun(int lun)
104 {
105 	return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
106 }
107 
ufshpb_is_pinned_region(struct ufshpb_lu * hpb,int rgn_idx)108 static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
109 {
110 	if (hpb->lu_pinned_end != PINNED_NOT_SET &&
111 	    rgn_idx >= hpb->lu_pinned_start &&
112 	    rgn_idx <= hpb->lu_pinned_end)
113 		return true;
114 
115 	return false;
116 }
117 
ufshpb_kick_map_work(struct ufshpb_lu * hpb)118 static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
119 {
120 	bool ret = false;
121 	unsigned long flags;
122 
123 	if (ufshpb_get_state(hpb) != HPB_PRESENT)
124 		return;
125 
126 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
127 	if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
128 		ret = true;
129 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
130 
131 	if (ret)
132 		queue_work(ufshpb_wq, &hpb->map_work);
133 }
134 
ufshpb_is_hpb_rsp_valid(struct ufs_hba * hba,struct ufshcd_lrb * lrbp,struct utp_hpb_rsp * rsp_field)135 static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
136 				    struct ufshcd_lrb *lrbp,
137 				    struct utp_hpb_rsp *rsp_field)
138 {
139 	/* Check HPB_UPDATE_ALERT */
140 	if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
141 	      UPIU_HEADER_DWORD(0, 2, 0, 0)))
142 		return false;
143 
144 	if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
145 	    rsp_field->desc_type != DEV_DES_TYPE ||
146 	    rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
147 	    rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
148 	    rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
149 	    rsp_field->hpb_op == HPB_RSP_NONE ||
150 	    (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
151 	     !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
152 		return false;
153 
154 	if (!ufshpb_is_general_lun(rsp_field->lun)) {
155 		dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
156 			 lrbp->lun);
157 		return false;
158 	}
159 
160 	return true;
161 }
162 
ufshpb_iterate_rgn(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx,int srgn_offset,int cnt,bool set_dirty)163 static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
164 			       int srgn_offset, int cnt, bool set_dirty)
165 {
166 	struct ufshpb_region *rgn;
167 	struct ufshpb_subregion *srgn, *prev_srgn = NULL;
168 	int set_bit_len;
169 	int bitmap_len;
170 	unsigned long flags;
171 
172 next_srgn:
173 	rgn = hpb->rgn_tbl + rgn_idx;
174 	srgn = rgn->srgn_tbl + srgn_idx;
175 
176 	if (likely(!srgn->is_last))
177 		bitmap_len = hpb->entries_per_srgn;
178 	else
179 		bitmap_len = hpb->last_srgn_entries;
180 
181 	if ((srgn_offset + cnt) > bitmap_len)
182 		set_bit_len = bitmap_len - srgn_offset;
183 	else
184 		set_bit_len = cnt;
185 
186 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
187 	if (rgn->rgn_state != HPB_RGN_INACTIVE) {
188 		if (set_dirty) {
189 		    if (srgn->srgn_state == HPB_SRGN_VALID)
190 			    bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
191 				       set_bit_len);
192 		} else if (hpb->is_hcm) {
193 			/* rewind the read timer for lru regions */
194 			rgn->read_timeout = ktime_add_ms(ktime_get(),
195 					rgn->hpb->params.read_timeout_ms);
196 			rgn->read_timeout_expiries =
197 				rgn->hpb->params.read_timeout_expiries;
198 		}
199 	}
200 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
201 
202 	if (hpb->is_hcm && prev_srgn != srgn) {
203 		bool activate = false;
204 
205 		spin_lock(&rgn->rgn_lock);
206 		if (set_dirty) {
207 			rgn->reads -= srgn->reads;
208 			srgn->reads = 0;
209 			set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
210 		} else {
211 			srgn->reads++;
212 			rgn->reads++;
213 			if (srgn->reads == hpb->params.activation_thld)
214 				activate = true;
215 		}
216 		spin_unlock(&rgn->rgn_lock);
217 
218 		if (activate ||
219 		    test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
220 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
221 			ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
222 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
223 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
224 				"activate region %d-%d\n", rgn_idx, srgn_idx);
225 		}
226 
227 		prev_srgn = srgn;
228 	}
229 
230 	srgn_offset = 0;
231 	if (++srgn_idx == hpb->srgns_per_rgn) {
232 		srgn_idx = 0;
233 		rgn_idx++;
234 	}
235 
236 	cnt -= set_bit_len;
237 	if (cnt > 0)
238 		goto next_srgn;
239 }
240 
ufshpb_test_ppn_dirty(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx,int srgn_offset,int cnt)241 static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
242 				  int srgn_idx, int srgn_offset, int cnt)
243 {
244 	struct ufshpb_region *rgn;
245 	struct ufshpb_subregion *srgn;
246 	int bitmap_len;
247 	int bit_len;
248 
249 next_srgn:
250 	rgn = hpb->rgn_tbl + rgn_idx;
251 	srgn = rgn->srgn_tbl + srgn_idx;
252 
253 	if (likely(!srgn->is_last))
254 		bitmap_len = hpb->entries_per_srgn;
255 	else
256 		bitmap_len = hpb->last_srgn_entries;
257 
258 	if (!ufshpb_is_valid_srgn(rgn, srgn))
259 		return true;
260 
261 	/*
262 	 * If the region state is active, mctx must be allocated.
263 	 * In this case, check whether the region is evicted or
264 	 * mctx allcation fail.
265 	 */
266 	if (unlikely(!srgn->mctx)) {
267 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
268 			"no mctx in region %d subregion %d.\n",
269 			srgn->rgn_idx, srgn->srgn_idx);
270 		return true;
271 	}
272 
273 	if ((srgn_offset + cnt) > bitmap_len)
274 		bit_len = bitmap_len - srgn_offset;
275 	else
276 		bit_len = cnt;
277 
278 	if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
279 			  srgn_offset) < bit_len + srgn_offset)
280 		return true;
281 
282 	srgn_offset = 0;
283 	if (++srgn_idx == hpb->srgns_per_rgn) {
284 		srgn_idx = 0;
285 		rgn_idx++;
286 	}
287 
288 	cnt -= bit_len;
289 	if (cnt > 0)
290 		goto next_srgn;
291 
292 	return false;
293 }
294 
is_rgn_dirty(struct ufshpb_region * rgn)295 static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
296 {
297 	return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
298 }
299 
ufshpb_fill_ppn_from_page(struct ufshpb_lu * hpb,struct ufshpb_map_ctx * mctx,int pos,int len,__be64 * ppn_buf)300 static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
301 				     struct ufshpb_map_ctx *mctx, int pos,
302 				     int len, __be64 *ppn_buf)
303 {
304 	struct page *page;
305 	int index, offset;
306 	int copied;
307 
308 	index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
309 	offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
310 
311 	if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
312 		copied = len;
313 	else
314 		copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
315 
316 	page = mctx->m_page[index];
317 	if (unlikely(!page)) {
318 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
319 			"error. cannot find page in mctx\n");
320 		return -ENOMEM;
321 	}
322 
323 	memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
324 	       copied * HPB_ENTRY_SIZE);
325 
326 	return copied;
327 }
328 
329 static void
ufshpb_get_pos_from_lpn(struct ufshpb_lu * hpb,unsigned long lpn,int * rgn_idx,int * srgn_idx,int * offset)330 ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
331 			int *srgn_idx, int *offset)
332 {
333 	int rgn_offset;
334 
335 	*rgn_idx = lpn >> hpb->entries_per_rgn_shift;
336 	rgn_offset = lpn & hpb->entries_per_rgn_mask;
337 	*srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
338 	*offset = rgn_offset & hpb->entries_per_srgn_mask;
339 }
340 
341 static void
ufshpb_set_hpb_read_to_upiu(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshcd_lrb * lrbp,u32 lpn,__be64 ppn,u8 transfer_len,int read_id)342 ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
343 			    struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
344 			    u8 transfer_len, int read_id)
345 {
346 	unsigned char *cdb = lrbp->cmd->cmnd;
347 	__be64 ppn_tmp = ppn;
348 	cdb[0] = UFSHPB_READ;
349 
350 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
351 		ppn_tmp = swab64(ppn);
352 
353 	/* ppn value is stored as big-endian in the host memory */
354 	memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
355 	cdb[14] = transfer_len;
356 	cdb[15] = read_id;
357 
358 	lrbp->cmd->cmd_len = UFS_CDB_SIZE;
359 }
360 
ufshpb_set_write_buf_cmd(unsigned char * cdb,unsigned long lpn,unsigned int len,int read_id)361 static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
362 					    unsigned long lpn, unsigned int len,
363 					    int read_id)
364 {
365 	cdb[0] = UFSHPB_WRITE_BUFFER;
366 	cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
367 
368 	put_unaligned_be32(lpn, &cdb[2]);
369 	cdb[6] = read_id;
370 	put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
371 
372 	cdb[9] = 0x00;	/* Control = 0x00 */
373 }
374 
ufshpb_get_pre_req(struct ufshpb_lu * hpb)375 static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
376 {
377 	struct ufshpb_req *pre_req;
378 
379 	if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
380 		dev_info(&hpb->sdev_ufs_lu->sdev_dev,
381 			 "pre_req throttle. inflight %d throttle %d",
382 			 hpb->num_inflight_pre_req, hpb->throttle_pre_req);
383 		return NULL;
384 	}
385 
386 	pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
387 					   struct ufshpb_req, list_req);
388 	if (!pre_req) {
389 		dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
390 		return NULL;
391 	}
392 
393 	list_del_init(&pre_req->list_req);
394 	hpb->num_inflight_pre_req++;
395 
396 	return pre_req;
397 }
398 
ufshpb_put_pre_req(struct ufshpb_lu * hpb,struct ufshpb_req * pre_req)399 static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
400 				      struct ufshpb_req *pre_req)
401 {
402 	pre_req->req = NULL;
403 	bio_reset(pre_req->bio);
404 	list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
405 	hpb->num_inflight_pre_req--;
406 }
407 
ufshpb_pre_req_compl_fn(struct request * req,blk_status_t error)408 static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
409 {
410 	struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
411 	struct ufshpb_lu *hpb = pre_req->hpb;
412 	unsigned long flags;
413 
414 	if (error) {
415 		struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
416 		struct scsi_sense_hdr sshdr;
417 
418 		dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
419 		scsi_command_normalize_sense(cmd, &sshdr);
420 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
421 			"code %x sense_key %x asc %x ascq %x",
422 			sshdr.response_code,
423 			sshdr.sense_key, sshdr.asc, sshdr.ascq);
424 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
425 			"byte4 %x byte5 %x byte6 %x additional_len %x",
426 			sshdr.byte4, sshdr.byte5,
427 			sshdr.byte6, sshdr.additional_length);
428 	}
429 
430 	blk_mq_free_request(req);
431 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
432 	ufshpb_put_pre_req(pre_req->hpb, pre_req);
433 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
434 }
435 
ufshpb_prep_entry(struct ufshpb_req * pre_req,struct page * page)436 static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
437 {
438 	struct ufshpb_lu *hpb = pre_req->hpb;
439 	struct ufshpb_region *rgn;
440 	struct ufshpb_subregion *srgn;
441 	__be64 *addr;
442 	int offset = 0;
443 	int copied;
444 	unsigned long lpn = pre_req->wb.lpn;
445 	int rgn_idx, srgn_idx, srgn_offset;
446 	unsigned long flags;
447 
448 	addr = page_address(page);
449 	ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
450 
451 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
452 
453 next_offset:
454 	rgn = hpb->rgn_tbl + rgn_idx;
455 	srgn = rgn->srgn_tbl + srgn_idx;
456 
457 	if (!ufshpb_is_valid_srgn(rgn, srgn))
458 		goto mctx_error;
459 
460 	if (!srgn->mctx)
461 		goto mctx_error;
462 
463 	copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
464 					   pre_req->wb.len - offset,
465 					   &addr[offset]);
466 
467 	if (copied < 0)
468 		goto mctx_error;
469 
470 	offset += copied;
471 	srgn_offset += copied;
472 
473 	if (srgn_offset == hpb->entries_per_srgn) {
474 		srgn_offset = 0;
475 
476 		if (++srgn_idx == hpb->srgns_per_rgn) {
477 			srgn_idx = 0;
478 			rgn_idx++;
479 		}
480 	}
481 
482 	if (offset < pre_req->wb.len)
483 		goto next_offset;
484 
485 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
486 	return 0;
487 mctx_error:
488 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
489 	return -ENOMEM;
490 }
491 
ufshpb_pre_req_add_bio_page(struct ufshpb_lu * hpb,struct request_queue * q,struct ufshpb_req * pre_req)492 static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
493 				       struct request_queue *q,
494 				       struct ufshpb_req *pre_req)
495 {
496 	struct page *page = pre_req->wb.m_page;
497 	struct bio *bio = pre_req->bio;
498 	int entries_bytes, ret;
499 
500 	if (!page)
501 		return -ENOMEM;
502 
503 	if (ufshpb_prep_entry(pre_req, page))
504 		return -ENOMEM;
505 
506 	entries_bytes = pre_req->wb.len * sizeof(__be64);
507 
508 	ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
509 	if (ret != entries_bytes) {
510 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
511 			"bio_add_pc_page fail: %d", ret);
512 		return -ENOMEM;
513 	}
514 	return 0;
515 }
516 
ufshpb_get_read_id(struct ufshpb_lu * hpb)517 static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
518 {
519 	if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
520 		hpb->cur_read_id = 1;
521 	return hpb->cur_read_id;
522 }
523 
ufshpb_execute_pre_req(struct ufshpb_lu * hpb,struct scsi_cmnd * cmd,struct ufshpb_req * pre_req,int read_id)524 static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
525 				  struct ufshpb_req *pre_req, int read_id)
526 {
527 	struct scsi_device *sdev = cmd->device;
528 	struct request_queue *q = sdev->request_queue;
529 	struct request *req;
530 	struct scsi_request *rq;
531 	struct bio *bio = pre_req->bio;
532 
533 	pre_req->hpb = hpb;
534 	pre_req->wb.lpn = sectors_to_logical(cmd->device,
535 					     blk_rq_pos(cmd->request));
536 	pre_req->wb.len = sectors_to_logical(cmd->device,
537 					     blk_rq_sectors(cmd->request));
538 	if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
539 		return -ENOMEM;
540 
541 	req = pre_req->req;
542 
543 	/* 1. request setup */
544 	blk_rq_append_bio(req, &bio);
545 	req->rq_disk = NULL;
546 	req->end_io_data = (void *)pre_req;
547 	req->end_io = ufshpb_pre_req_compl_fn;
548 
549 	/* 2. scsi_request setup */
550 	rq = scsi_req(req);
551 	rq->retries = 1;
552 
553 	ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
554 				 read_id);
555 	rq->cmd_len = scsi_command_size(rq->cmd);
556 
557 	if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
558 		return -EAGAIN;
559 
560 	hpb->stats.pre_req_cnt++;
561 
562 	return 0;
563 }
564 
ufshpb_issue_pre_req(struct ufshpb_lu * hpb,struct scsi_cmnd * cmd,int * read_id)565 static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
566 				int *read_id)
567 {
568 	struct ufshpb_req *pre_req;
569 	struct request *req = NULL;
570 	unsigned long flags;
571 	int _read_id;
572 	int ret = 0;
573 
574 	req = blk_get_request(cmd->device->request_queue,
575 			      REQ_OP_SCSI_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
576 	if (IS_ERR(req))
577 		return -EAGAIN;
578 
579 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
580 	pre_req = ufshpb_get_pre_req(hpb);
581 	if (!pre_req) {
582 		ret = -EAGAIN;
583 		goto unlock_out;
584 	}
585 	_read_id = ufshpb_get_read_id(hpb);
586 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
587 
588 	pre_req->req = req;
589 
590 	ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
591 	if (ret)
592 		goto free_pre_req;
593 
594 	*read_id = _read_id;
595 
596 	return ret;
597 free_pre_req:
598 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
599 	ufshpb_put_pre_req(hpb, pre_req);
600 unlock_out:
601 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
602 	blk_put_request(req);
603 	return ret;
604 }
605 
606 /*
607  * This function will set up HPB read command using host-side L2P map data.
608  */
ufshpb_prep(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)609 int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
610 {
611 	struct ufshpb_lu *hpb;
612 	struct ufshpb_region *rgn;
613 	struct ufshpb_subregion *srgn;
614 	struct scsi_cmnd *cmd = lrbp->cmd;
615 	u32 lpn;
616 	__be64 ppn;
617 	unsigned long flags;
618 	int transfer_len, rgn_idx, srgn_idx, srgn_offset;
619 	int read_id = 0;
620 	int err = 0;
621 
622 	hpb = ufshpb_get_hpb_data(cmd->device);
623 	if (!hpb)
624 		return -ENODEV;
625 
626 	if (ufshpb_get_state(hpb) == HPB_INIT)
627 		return -ENODEV;
628 
629 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
630 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
631 			   "%s: ufshpb state is not PRESENT", __func__);
632 		return -ENODEV;
633 	}
634 
635 	if (blk_rq_is_scsi(cmd->request) ||
636 	    (!ufshpb_is_write_or_discard(cmd) &&
637 	     !ufshpb_is_read_cmd(cmd)))
638 		return 0;
639 
640 	transfer_len = sectors_to_logical(cmd->device,
641 					  blk_rq_sectors(cmd->request));
642 	if (unlikely(!transfer_len))
643 		return 0;
644 
645 	lpn = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
646 	ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
647 	rgn = hpb->rgn_tbl + rgn_idx;
648 	srgn = rgn->srgn_tbl + srgn_idx;
649 
650 	/* If command type is WRITE or DISCARD, set bitmap as drity */
651 	if (ufshpb_is_write_or_discard(cmd)) {
652 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
653 				   transfer_len, true);
654 		return 0;
655 	}
656 
657 	if (!ufshpb_is_supported_chunk(hpb, transfer_len))
658 		return 0;
659 
660 	WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH);
661 
662 	if (hpb->is_hcm) {
663 		/*
664 		 * in host control mode, reads are the main source for
665 		 * activation trials.
666 		 */
667 		ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
668 				   transfer_len, false);
669 
670 		/* keep those counters normalized */
671 		if (rgn->reads > hpb->entries_per_srgn)
672 			schedule_work(&hpb->ufshpb_normalization_work);
673 	}
674 
675 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
676 	if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
677 				   transfer_len)) {
678 		hpb->stats.miss_cnt++;
679 		spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
680 		return 0;
681 	}
682 
683 	err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
684 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
685 	if (unlikely(err < 0)) {
686 		/*
687 		 * In this case, the region state is active,
688 		 * but the ppn table is not allocated.
689 		 * Make sure that ppn table must be allocated on
690 		 * active state.
691 		 */
692 		dev_err(hba->dev, "get ppn failed. err %d\n", err);
693 		return err;
694 	}
695 
696 	if (!ufshpb_is_legacy(hba) &&
697 	    ufshpb_is_required_wb(hpb, transfer_len)) {
698 		err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
699 		if (err) {
700 			unsigned long timeout;
701 
702 			timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
703 				  hpb->params.requeue_timeout_ms);
704 
705 			if (time_before(jiffies, timeout))
706 				return -EAGAIN;
707 
708 			hpb->stats.miss_cnt++;
709 			return 0;
710 		}
711 	}
712 
713 	ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
714 				    read_id);
715 
716 	hpb->stats.hit_cnt++;
717 	return 0;
718 }
719 
ufshpb_get_req(struct ufshpb_lu * hpb,int rgn_idx,enum req_opf dir,bool atomic)720 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
721 					 int rgn_idx, enum req_opf dir,
722 					 bool atomic)
723 {
724 	struct ufshpb_req *rq;
725 	struct request *req;
726 	int retries = HPB_MAP_REQ_RETRIES;
727 
728 	rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
729 	if (!rq)
730 		return NULL;
731 
732 retry:
733 	req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
734 			      BLK_MQ_REQ_NOWAIT);
735 
736 	if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
737 		usleep_range(3000, 3100);
738 		goto retry;
739 	}
740 
741 	if (IS_ERR(req))
742 		goto free_rq;
743 
744 	rq->hpb = hpb;
745 	rq->req = req;
746 	rq->rb.rgn_idx = rgn_idx;
747 
748 	return rq;
749 
750 free_rq:
751 	kmem_cache_free(hpb->map_req_cache, rq);
752 	return NULL;
753 }
754 
ufshpb_put_req(struct ufshpb_lu * hpb,struct ufshpb_req * rq)755 static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
756 {
757 	blk_put_request(rq->req);
758 	kmem_cache_free(hpb->map_req_cache, rq);
759 }
760 
ufshpb_get_map_req(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)761 static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
762 					     struct ufshpb_subregion *srgn)
763 {
764 	struct ufshpb_req *map_req;
765 	struct bio *bio;
766 	unsigned long flags;
767 
768 	if (hpb->is_hcm &&
769 	    hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
770 		dev_info(&hpb->sdev_ufs_lu->sdev_dev,
771 			 "map_req throttle. inflight %d throttle %d",
772 			 hpb->num_inflight_map_req,
773 			 hpb->params.inflight_map_req);
774 		return NULL;
775 	}
776 
777 	map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN, false);
778 	if (!map_req)
779 		return NULL;
780 
781 	bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
782 	if (!bio) {
783 		ufshpb_put_req(hpb, map_req);
784 		return NULL;
785 	}
786 
787 	map_req->bio = bio;
788 
789 	map_req->rb.srgn_idx = srgn->srgn_idx;
790 	map_req->rb.mctx = srgn->mctx;
791 
792 	spin_lock_irqsave(&hpb->param_lock, flags);
793 	hpb->num_inflight_map_req++;
794 	spin_unlock_irqrestore(&hpb->param_lock, flags);
795 
796 	return map_req;
797 }
798 
ufshpb_put_map_req(struct ufshpb_lu * hpb,struct ufshpb_req * map_req)799 static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
800 			       struct ufshpb_req *map_req)
801 {
802 	unsigned long flags;
803 
804 	bio_put(map_req->bio);
805 	ufshpb_put_req(hpb, map_req);
806 
807 	spin_lock_irqsave(&hpb->param_lock, flags);
808 	hpb->num_inflight_map_req--;
809 	spin_unlock_irqrestore(&hpb->param_lock, flags);
810 }
811 
ufshpb_clear_dirty_bitmap(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)812 static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
813 				     struct ufshpb_subregion *srgn)
814 {
815 	struct ufshpb_region *rgn;
816 	u32 num_entries = hpb->entries_per_srgn;
817 
818 	if (!srgn->mctx) {
819 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
820 			"no mctx in region %d subregion %d.\n",
821 			srgn->rgn_idx, srgn->srgn_idx);
822 		return -1;
823 	}
824 
825 	if (unlikely(srgn->is_last))
826 		num_entries = hpb->last_srgn_entries;
827 
828 	bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
829 
830 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
831 	clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
832 
833 	return 0;
834 }
835 
ufshpb_update_active_info(struct ufshpb_lu * hpb,int rgn_idx,int srgn_idx)836 static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
837 				      int srgn_idx)
838 {
839 	struct ufshpb_region *rgn;
840 	struct ufshpb_subregion *srgn;
841 
842 	rgn = hpb->rgn_tbl + rgn_idx;
843 	srgn = rgn->srgn_tbl + srgn_idx;
844 
845 	list_del_init(&rgn->list_inact_rgn);
846 
847 	if (list_empty(&srgn->list_act_srgn))
848 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
849 
850 	hpb->stats.rb_active_cnt++;
851 }
852 
ufshpb_update_inactive_info(struct ufshpb_lu * hpb,int rgn_idx)853 static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
854 {
855 	struct ufshpb_region *rgn;
856 	struct ufshpb_subregion *srgn;
857 	int srgn_idx;
858 
859 	rgn = hpb->rgn_tbl + rgn_idx;
860 
861 	for_each_sub_region(rgn, srgn_idx, srgn)
862 		list_del_init(&srgn->list_act_srgn);
863 
864 	if (list_empty(&rgn->list_inact_rgn))
865 		list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
866 
867 	hpb->stats.rb_inactive_cnt++;
868 }
869 
ufshpb_activate_subregion(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)870 static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
871 				      struct ufshpb_subregion *srgn)
872 {
873 	struct ufshpb_region *rgn;
874 
875 	/*
876 	 * If there is no mctx in subregion
877 	 * after I/O progress for HPB_READ_BUFFER, the region to which the
878 	 * subregion belongs was evicted.
879 	 * Make sure the region must not evict in I/O progress
880 	 */
881 	if (!srgn->mctx) {
882 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
883 			"no mctx in region %d subregion %d.\n",
884 			srgn->rgn_idx, srgn->srgn_idx);
885 		srgn->srgn_state = HPB_SRGN_INVALID;
886 		return;
887 	}
888 
889 	rgn = hpb->rgn_tbl + srgn->rgn_idx;
890 
891 	if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
892 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
893 			"region %d subregion %d evicted\n",
894 			srgn->rgn_idx, srgn->srgn_idx);
895 		srgn->srgn_state = HPB_SRGN_INVALID;
896 		return;
897 	}
898 	srgn->srgn_state = HPB_SRGN_VALID;
899 }
900 
ufshpb_umap_req_compl_fn(struct request * req,blk_status_t error)901 static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error)
902 {
903 	struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data;
904 
905 	ufshpb_put_req(umap_req->hpb, umap_req);
906 }
907 
ufshpb_map_req_compl_fn(struct request * req,blk_status_t error)908 static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error)
909 {
910 	struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data;
911 	struct ufshpb_lu *hpb = map_req->hpb;
912 	struct ufshpb_subregion *srgn;
913 	unsigned long flags;
914 
915 	srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
916 		map_req->rb.srgn_idx;
917 
918 	ufshpb_clear_dirty_bitmap(hpb, srgn);
919 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
920 	ufshpb_activate_subregion(hpb, srgn);
921 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
922 
923 	ufshpb_put_map_req(map_req->hpb, map_req);
924 }
925 
ufshpb_set_unmap_cmd(unsigned char * cdb,struct ufshpb_region * rgn)926 static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
927 {
928 	cdb[0] = UFSHPB_WRITE_BUFFER;
929 	cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
930 			  UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
931 	if (rgn)
932 		put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
933 	cdb[9] = 0x00;
934 }
935 
ufshpb_set_read_buf_cmd(unsigned char * cdb,int rgn_idx,int srgn_idx,int srgn_mem_size)936 static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
937 				    int srgn_idx, int srgn_mem_size)
938 {
939 	cdb[0] = UFSHPB_READ_BUFFER;
940 	cdb[1] = UFSHPB_READ_BUFFER_ID;
941 
942 	put_unaligned_be16(rgn_idx, &cdb[2]);
943 	put_unaligned_be16(srgn_idx, &cdb[4]);
944 	put_unaligned_be24(srgn_mem_size, &cdb[6]);
945 
946 	cdb[9] = 0x00;
947 }
948 
ufshpb_execute_umap_req(struct ufshpb_lu * hpb,struct ufshpb_req * umap_req,struct ufshpb_region * rgn)949 static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
950 				   struct ufshpb_req *umap_req,
951 				   struct ufshpb_region *rgn)
952 {
953 	struct request *req;
954 	struct scsi_request *rq;
955 
956 	req = umap_req->req;
957 	req->timeout = 0;
958 	req->end_io_data = (void *)umap_req;
959 	rq = scsi_req(req);
960 	ufshpb_set_unmap_cmd(rq->cmd, rgn);
961 	rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
962 
963 	blk_execute_rq_nowait(req->q, NULL, req, 1, ufshpb_umap_req_compl_fn);
964 
965 	hpb->stats.umap_req_cnt++;
966 }
967 
ufshpb_execute_map_req(struct ufshpb_lu * hpb,struct ufshpb_req * map_req,bool last)968 static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
969 				  struct ufshpb_req *map_req, bool last)
970 {
971 	struct request_queue *q;
972 	struct request *req;
973 	struct scsi_request *rq;
974 	int mem_size = hpb->srgn_mem_size;
975 	int ret = 0;
976 	int i;
977 
978 	q = hpb->sdev_ufs_lu->request_queue;
979 	for (i = 0; i < hpb->pages_per_srgn; i++) {
980 		ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
981 				      PAGE_SIZE, 0);
982 		if (ret != PAGE_SIZE) {
983 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
984 				   "bio_add_pc_page fail %d - %d\n",
985 				   map_req->rb.rgn_idx, map_req->rb.srgn_idx);
986 			return ret;
987 		}
988 	}
989 
990 	req = map_req->req;
991 
992 	blk_rq_append_bio(req, &map_req->bio);
993 
994 	req->end_io_data = map_req;
995 
996 	rq = scsi_req(req);
997 
998 	if (unlikely(last))
999 		mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
1000 
1001 	ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx,
1002 				map_req->rb.srgn_idx, mem_size);
1003 	rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
1004 
1005 	blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn);
1006 
1007 	hpb->stats.map_req_cnt++;
1008 	return 0;
1009 }
1010 
ufshpb_get_map_ctx(struct ufshpb_lu * hpb,bool last)1011 static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
1012 						 bool last)
1013 {
1014 	struct ufshpb_map_ctx *mctx;
1015 	u32 num_entries = hpb->entries_per_srgn;
1016 	int i, j;
1017 
1018 	mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
1019 	if (!mctx)
1020 		return NULL;
1021 
1022 	mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
1023 	if (!mctx->m_page)
1024 		goto release_mctx;
1025 
1026 	if (unlikely(last))
1027 		num_entries = hpb->last_srgn_entries;
1028 
1029 	mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
1030 	if (!mctx->ppn_dirty)
1031 		goto release_m_page;
1032 
1033 	for (i = 0; i < hpb->pages_per_srgn; i++) {
1034 		mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
1035 		if (!mctx->m_page[i]) {
1036 			for (j = 0; j < i; j++)
1037 				mempool_free(mctx->m_page[j], ufshpb_page_pool);
1038 			goto release_ppn_dirty;
1039 		}
1040 		clear_page(page_address(mctx->m_page[i]));
1041 	}
1042 
1043 	return mctx;
1044 
1045 release_ppn_dirty:
1046 	bitmap_free(mctx->ppn_dirty);
1047 release_m_page:
1048 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
1049 release_mctx:
1050 	mempool_free(mctx, ufshpb_mctx_pool);
1051 	return NULL;
1052 }
1053 
ufshpb_put_map_ctx(struct ufshpb_lu * hpb,struct ufshpb_map_ctx * mctx)1054 static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
1055 			       struct ufshpb_map_ctx *mctx)
1056 {
1057 	int i;
1058 
1059 	for (i = 0; i < hpb->pages_per_srgn; i++)
1060 		mempool_free(mctx->m_page[i], ufshpb_page_pool);
1061 
1062 	bitmap_free(mctx->ppn_dirty);
1063 	kmem_cache_free(hpb->m_page_cache, mctx->m_page);
1064 	mempool_free(mctx, ufshpb_mctx_pool);
1065 }
1066 
ufshpb_check_srgns_issue_state(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1067 static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
1068 					  struct ufshpb_region *rgn)
1069 {
1070 	struct ufshpb_subregion *srgn;
1071 	int srgn_idx;
1072 
1073 	for_each_sub_region(rgn, srgn_idx, srgn)
1074 		if (srgn->srgn_state == HPB_SRGN_ISSUED)
1075 			return -EPERM;
1076 
1077 	return 0;
1078 }
1079 
ufshpb_read_to_handler(struct work_struct * work)1080 static void ufshpb_read_to_handler(struct work_struct *work)
1081 {
1082 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1083 					     ufshpb_read_to_work.work);
1084 	struct victim_select_info *lru_info = &hpb->lru_info;
1085 	struct ufshpb_region *rgn, *next_rgn;
1086 	unsigned long flags;
1087 	unsigned int poll;
1088 	LIST_HEAD(expired_list);
1089 
1090 	if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
1091 		return;
1092 
1093 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1094 
1095 	list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
1096 				 list_lru_rgn) {
1097 		bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
1098 
1099 		if (timedout) {
1100 			rgn->read_timeout_expiries--;
1101 			if (is_rgn_dirty(rgn) ||
1102 			    rgn->read_timeout_expiries == 0)
1103 				list_add(&rgn->list_expired_rgn, &expired_list);
1104 			else
1105 				rgn->read_timeout = ktime_add_ms(ktime_get(),
1106 						hpb->params.read_timeout_ms);
1107 		}
1108 	}
1109 
1110 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1111 
1112 	list_for_each_entry_safe(rgn, next_rgn, &expired_list,
1113 				 list_expired_rgn) {
1114 		list_del_init(&rgn->list_expired_rgn);
1115 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1116 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1117 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1118 	}
1119 
1120 	ufshpb_kick_map_work(hpb);
1121 
1122 	clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
1123 
1124 	poll = hpb->params.timeout_polling_interval_ms;
1125 	schedule_delayed_work(&hpb->ufshpb_read_to_work,
1126 			      msecs_to_jiffies(poll));
1127 }
1128 
ufshpb_add_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)1129 static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
1130 				struct ufshpb_region *rgn)
1131 {
1132 	rgn->rgn_state = HPB_RGN_ACTIVE;
1133 	list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
1134 	atomic_inc(&lru_info->active_cnt);
1135 	if (rgn->hpb->is_hcm) {
1136 		rgn->read_timeout =
1137 			ktime_add_ms(ktime_get(),
1138 				     rgn->hpb->params.read_timeout_ms);
1139 		rgn->read_timeout_expiries =
1140 			rgn->hpb->params.read_timeout_expiries;
1141 	}
1142 }
1143 
ufshpb_hit_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)1144 static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
1145 				struct ufshpb_region *rgn)
1146 {
1147 	list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
1148 }
1149 
ufshpb_victim_lru_info(struct ufshpb_lu * hpb)1150 static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
1151 {
1152 	struct victim_select_info *lru_info = &hpb->lru_info;
1153 	struct ufshpb_region *rgn, *victim_rgn = NULL;
1154 
1155 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
1156 		if (!rgn) {
1157 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1158 				"%s: no region allocated\n",
1159 				__func__);
1160 			return NULL;
1161 		}
1162 		if (ufshpb_check_srgns_issue_state(hpb, rgn))
1163 			continue;
1164 
1165 		/*
1166 		 * in host control mode, verify that the exiting region
1167 		 * has less reads
1168 		 */
1169 		if (hpb->is_hcm &&
1170 		    rgn->reads > hpb->params.eviction_thld_exit)
1171 			continue;
1172 
1173 		victim_rgn = rgn;
1174 		break;
1175 	}
1176 
1177 	return victim_rgn;
1178 }
1179 
ufshpb_cleanup_lru_info(struct victim_select_info * lru_info,struct ufshpb_region * rgn)1180 static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
1181 				    struct ufshpb_region *rgn)
1182 {
1183 	list_del_init(&rgn->list_lru_rgn);
1184 	rgn->rgn_state = HPB_RGN_INACTIVE;
1185 	atomic_dec(&lru_info->active_cnt);
1186 }
1187 
ufshpb_purge_active_subregion(struct ufshpb_lu * hpb,struct ufshpb_subregion * srgn)1188 static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
1189 					  struct ufshpb_subregion *srgn)
1190 {
1191 	if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1192 		ufshpb_put_map_ctx(hpb, srgn->mctx);
1193 		srgn->srgn_state = HPB_SRGN_UNUSED;
1194 		srgn->mctx = NULL;
1195 	}
1196 }
1197 
ufshpb_issue_umap_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,bool atomic)1198 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
1199 				 struct ufshpb_region *rgn,
1200 				 bool atomic)
1201 {
1202 	struct ufshpb_req *umap_req;
1203 	int rgn_idx = rgn ? rgn->rgn_idx : 0;
1204 
1205 	umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT, atomic);
1206 	if (!umap_req)
1207 		return -ENOMEM;
1208 
1209 	ufshpb_execute_umap_req(hpb, umap_req, rgn);
1210 
1211 	return 0;
1212 }
1213 
ufshpb_issue_umap_single_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1214 static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
1215 					struct ufshpb_region *rgn)
1216 {
1217 	return ufshpb_issue_umap_req(hpb, rgn, true);
1218 }
1219 
ufshpb_issue_umap_all_req(struct ufshpb_lu * hpb)1220 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
1221 {
1222 	return ufshpb_issue_umap_req(hpb, NULL, false);
1223 }
1224 
__ufshpb_evict_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1225 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
1226 				 struct ufshpb_region *rgn)
1227 {
1228 	struct victim_select_info *lru_info;
1229 	struct ufshpb_subregion *srgn;
1230 	int srgn_idx;
1231 
1232 	lru_info = &hpb->lru_info;
1233 
1234 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
1235 
1236 	ufshpb_cleanup_lru_info(lru_info, rgn);
1237 
1238 	for_each_sub_region(rgn, srgn_idx, srgn)
1239 		ufshpb_purge_active_subregion(hpb, srgn);
1240 }
1241 
ufshpb_evict_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1242 static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1243 {
1244 	unsigned long flags;
1245 	int ret = 0;
1246 
1247 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1248 	if (rgn->rgn_state == HPB_RGN_PINNED) {
1249 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1250 			 "pinned region cannot drop-out. region %d\n",
1251 			 rgn->rgn_idx);
1252 		goto out;
1253 	}
1254 	if (!list_empty(&rgn->list_lru_rgn)) {
1255 		if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
1256 			ret = -EBUSY;
1257 			goto out;
1258 		}
1259 
1260 		if (hpb->is_hcm) {
1261 			spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1262 			ret = ufshpb_issue_umap_single_req(hpb, rgn);
1263 			spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1264 			if (ret)
1265 				goto out;
1266 		}
1267 
1268 		__ufshpb_evict_region(hpb, rgn);
1269 	}
1270 out:
1271 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1272 	return ret;
1273 }
1274 
ufshpb_issue_map_req(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)1275 static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
1276 				struct ufshpb_region *rgn,
1277 				struct ufshpb_subregion *srgn)
1278 {
1279 	struct ufshpb_req *map_req;
1280 	unsigned long flags;
1281 	int ret;
1282 	int err = -EAGAIN;
1283 	bool alloc_required = false;
1284 	enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
1285 
1286 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1287 
1288 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1289 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1290 			   "%s: ufshpb state is not PRESENT\n", __func__);
1291 		goto unlock_out;
1292 	}
1293 
1294 	if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
1295 	    (srgn->srgn_state == HPB_SRGN_INVALID)) {
1296 		err = 0;
1297 		goto unlock_out;
1298 	}
1299 
1300 	if (srgn->srgn_state == HPB_SRGN_UNUSED)
1301 		alloc_required = true;
1302 
1303 	/*
1304 	 * If the subregion is already ISSUED state,
1305 	 * a specific event (e.g., GC or wear-leveling, etc.) occurs in
1306 	 * the device and HPB response for map loading is received.
1307 	 * In this case, after finishing the HPB_READ_BUFFER,
1308 	 * the next HPB_READ_BUFFER is performed again to obtain the latest
1309 	 * map data.
1310 	 */
1311 	if (srgn->srgn_state == HPB_SRGN_ISSUED)
1312 		goto unlock_out;
1313 
1314 	srgn->srgn_state = HPB_SRGN_ISSUED;
1315 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1316 
1317 	if (alloc_required) {
1318 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1319 		if (!srgn->mctx) {
1320 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1321 			    "get map_ctx failed. region %d - %d\n",
1322 			    rgn->rgn_idx, srgn->srgn_idx);
1323 			state = HPB_SRGN_UNUSED;
1324 			goto change_srgn_state;
1325 		}
1326 	}
1327 
1328 	map_req = ufshpb_get_map_req(hpb, srgn);
1329 	if (!map_req)
1330 		goto change_srgn_state;
1331 
1332 
1333 	ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
1334 	if (ret) {
1335 		dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1336 			   "%s: issue map_req failed: %d, region %d - %d\n",
1337 			   __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
1338 		goto free_map_req;
1339 	}
1340 	return 0;
1341 
1342 free_map_req:
1343 	ufshpb_put_map_req(hpb, map_req);
1344 change_srgn_state:
1345 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1346 	srgn->srgn_state = state;
1347 unlock_out:
1348 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1349 	return err;
1350 }
1351 
ufshpb_add_region(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1352 static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
1353 {
1354 	struct ufshpb_region *victim_rgn = NULL;
1355 	struct victim_select_info *lru_info = &hpb->lru_info;
1356 	unsigned long flags;
1357 	int ret = 0;
1358 
1359 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1360 	/*
1361 	 * If region belongs to lru_list, just move the region
1362 	 * to the front of lru list. because the state of the region
1363 	 * is already active-state
1364 	 */
1365 	if (!list_empty(&rgn->list_lru_rgn)) {
1366 		ufshpb_hit_lru_info(lru_info, rgn);
1367 		goto out;
1368 	}
1369 
1370 	if (rgn->rgn_state == HPB_RGN_INACTIVE) {
1371 		if (atomic_read(&lru_info->active_cnt) ==
1372 		    lru_info->max_lru_active_cnt) {
1373 			/*
1374 			 * If the maximum number of active regions
1375 			 * is exceeded, evict the least recently used region.
1376 			 * This case may occur when the device responds
1377 			 * to the eviction information late.
1378 			 * It is okay to evict the least recently used region,
1379 			 * because the device could detect this region
1380 			 * by not issuing HPB_READ
1381 			 *
1382 			 * in host control mode, verify that the entering
1383 			 * region has enough reads
1384 			 */
1385 			if (hpb->is_hcm &&
1386 			    rgn->reads < hpb->params.eviction_thld_enter) {
1387 				ret = -EACCES;
1388 				goto out;
1389 			}
1390 
1391 			victim_rgn = ufshpb_victim_lru_info(hpb);
1392 			if (!victim_rgn) {
1393 				dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1394 				    "cannot get victim region %s\n",
1395 				    hpb->is_hcm ? "" : "error");
1396 				ret = -ENOMEM;
1397 				goto out;
1398 			}
1399 
1400 			dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1401 				"LRU full (%d), choose victim %d\n",
1402 				atomic_read(&lru_info->active_cnt),
1403 				victim_rgn->rgn_idx);
1404 
1405 			if (hpb->is_hcm) {
1406 				spin_unlock_irqrestore(&hpb->rgn_state_lock,
1407 						       flags);
1408 				ret = ufshpb_issue_umap_single_req(hpb,
1409 								victim_rgn);
1410 				spin_lock_irqsave(&hpb->rgn_state_lock,
1411 						  flags);
1412 				if (ret)
1413 					goto out;
1414 			}
1415 
1416 			__ufshpb_evict_region(hpb, victim_rgn);
1417 		}
1418 
1419 		/*
1420 		 * When a region is added to lru_info list_head,
1421 		 * it is guaranteed that the subregion has been
1422 		 * assigned all mctx. If failed, try to receive mctx again
1423 		 * without being added to lru_info list_head
1424 		 */
1425 		ufshpb_add_lru_info(lru_info, rgn);
1426 	}
1427 out:
1428 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1429 	return ret;
1430 }
1431 
ufshpb_rsp_req_region_update(struct ufshpb_lu * hpb,struct utp_hpb_rsp * rsp_field)1432 static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
1433 					 struct utp_hpb_rsp *rsp_field)
1434 {
1435 	struct ufshpb_region *rgn;
1436 	struct ufshpb_subregion *srgn;
1437 	int i, rgn_i, srgn_i;
1438 
1439 	BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
1440 	/*
1441 	 * If the active region and the inactive region are the same,
1442 	 * we will inactivate this region.
1443 	 * The device could check this (region inactivated) and
1444 	 * will response the proper active region information
1445 	 */
1446 	for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
1447 		rgn_i =
1448 			be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
1449 		srgn_i =
1450 			be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
1451 
1452 		rgn = hpb->rgn_tbl + rgn_i;
1453 		if (hpb->is_hcm &&
1454 		    (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
1455 			/*
1456 			 * in host control mode, subregion activation
1457 			 * recommendations are only allowed to active regions.
1458 			 * Also, ignore recommendations for dirty regions - the
1459 			 * host will make decisions concerning those by himself
1460 			 */
1461 			continue;
1462 		}
1463 
1464 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1465 			"activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
1466 
1467 		spin_lock(&hpb->rsp_list_lock);
1468 		ufshpb_update_active_info(hpb, rgn_i, srgn_i);
1469 		spin_unlock(&hpb->rsp_list_lock);
1470 
1471 		srgn = rgn->srgn_tbl + srgn_i;
1472 
1473 		/* blocking HPB_READ */
1474 		spin_lock(&hpb->rgn_state_lock);
1475 		if (srgn->srgn_state == HPB_SRGN_VALID)
1476 			srgn->srgn_state = HPB_SRGN_INVALID;
1477 		spin_unlock(&hpb->rgn_state_lock);
1478 	}
1479 
1480 	if (hpb->is_hcm) {
1481 		/*
1482 		 * in host control mode the device is not allowed to inactivate
1483 		 * regions
1484 		 */
1485 		goto out;
1486 	}
1487 
1488 	for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
1489 		rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
1490 		dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
1491 			"inactivate(%d) region %d\n", i, rgn_i);
1492 
1493 		spin_lock(&hpb->rsp_list_lock);
1494 		ufshpb_update_inactive_info(hpb, rgn_i);
1495 		spin_unlock(&hpb->rsp_list_lock);
1496 
1497 		rgn = hpb->rgn_tbl + rgn_i;
1498 
1499 		spin_lock(&hpb->rgn_state_lock);
1500 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1501 			for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) {
1502 				srgn = rgn->srgn_tbl + srgn_i;
1503 				if (srgn->srgn_state == HPB_SRGN_VALID)
1504 					srgn->srgn_state = HPB_SRGN_INVALID;
1505 			}
1506 		}
1507 		spin_unlock(&hpb->rgn_state_lock);
1508 	}
1509 
1510 out:
1511 	dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
1512 		rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
1513 
1514 	if (ufshpb_get_state(hpb) == HPB_PRESENT)
1515 		queue_work(ufshpb_wq, &hpb->map_work);
1516 }
1517 
ufshpb_dev_reset_handler(struct ufshpb_lu * hpb)1518 static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb)
1519 {
1520 	struct victim_select_info *lru_info = &hpb->lru_info;
1521 	struct ufshpb_region *rgn;
1522 	unsigned long flags;
1523 
1524 	spin_lock_irqsave(&hpb->rgn_state_lock, flags);
1525 
1526 	list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
1527 		set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
1528 
1529 	spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
1530 }
1531 
1532 
1533 /*
1534  * This function will parse recommended active subregion information in sense
1535  * data field of response UPIU with SAM_STAT_GOOD state.
1536  */
ufshpb_rsp_upiu(struct ufs_hba * hba,struct ufshcd_lrb * lrbp)1537 void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1538 {
1539 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
1540 	struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
1541 	int data_seg_len;
1542 
1543 	if (unlikely(lrbp->lun != rsp_field->lun)) {
1544 		struct scsi_device *sdev;
1545 		bool found = false;
1546 
1547 		__shost_for_each_device(sdev, hba->host) {
1548 			hpb = ufshpb_get_hpb_data(sdev);
1549 
1550 			if (!hpb)
1551 				continue;
1552 
1553 			if (rsp_field->lun == hpb->lun) {
1554 				found = true;
1555 				break;
1556 			}
1557 		}
1558 
1559 		if (!found)
1560 			return;
1561 	}
1562 
1563 	if (!hpb)
1564 		return;
1565 
1566 	if (ufshpb_get_state(hpb) == HPB_INIT)
1567 		return;
1568 
1569 	if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
1570 	    (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
1571 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1572 			   "%s: ufshpb state is not PRESENT/SUSPEND\n",
1573 			   __func__);
1574 		return;
1575 	}
1576 
1577 	data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
1578 		& MASK_RSP_UPIU_DATA_SEG_LEN;
1579 
1580 	/* To flush remained rsp_list, we queue the map_work task */
1581 	if (!data_seg_len) {
1582 		if (!ufshpb_is_general_lun(hpb->lun))
1583 			return;
1584 
1585 		ufshpb_kick_map_work(hpb);
1586 		return;
1587 	}
1588 
1589 	BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
1590 
1591 	if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
1592 		return;
1593 
1594 	hpb->stats.rb_noti_cnt++;
1595 
1596 	switch (rsp_field->hpb_op) {
1597 	case HPB_RSP_REQ_REGION_UPDATE:
1598 		if (data_seg_len != DEV_DATA_SEG_LEN)
1599 			dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1600 				 "%s: data seg length is not same.\n",
1601 				 __func__);
1602 		ufshpb_rsp_req_region_update(hpb, rsp_field);
1603 		break;
1604 	case HPB_RSP_DEV_RESET:
1605 		dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
1606 			 "UFS device lost HPB information during PM.\n");
1607 
1608 		if (hpb->is_hcm) {
1609 			struct scsi_device *sdev;
1610 
1611 			__shost_for_each_device(sdev, hba->host) {
1612 				struct ufshpb_lu *h = sdev->hostdata;
1613 
1614 				if (h)
1615 					ufshpb_dev_reset_handler(h);
1616 			}
1617 		}
1618 
1619 		break;
1620 	default:
1621 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1622 			   "hpb_op is not available: %d\n",
1623 			   rsp_field->hpb_op);
1624 		break;
1625 	}
1626 }
1627 
ufshpb_add_active_list(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct ufshpb_subregion * srgn)1628 static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
1629 				   struct ufshpb_region *rgn,
1630 				   struct ufshpb_subregion *srgn)
1631 {
1632 	if (!list_empty(&rgn->list_inact_rgn))
1633 		return;
1634 
1635 	if (!list_empty(&srgn->list_act_srgn)) {
1636 		list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1637 		return;
1638 	}
1639 
1640 	list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1641 }
1642 
ufshpb_add_pending_evict_list(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,struct list_head * pending_list)1643 static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
1644 					  struct ufshpb_region *rgn,
1645 					  struct list_head *pending_list)
1646 {
1647 	struct ufshpb_subregion *srgn;
1648 	int srgn_idx;
1649 
1650 	if (!list_empty(&rgn->list_inact_rgn))
1651 		return;
1652 
1653 	for_each_sub_region(rgn, srgn_idx, srgn)
1654 		if (!list_empty(&srgn->list_act_srgn))
1655 			return;
1656 
1657 	list_add_tail(&rgn->list_inact_rgn, pending_list);
1658 }
1659 
ufshpb_run_active_subregion_list(struct ufshpb_lu * hpb)1660 static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
1661 {
1662 	struct ufshpb_region *rgn;
1663 	struct ufshpb_subregion *srgn;
1664 	unsigned long flags;
1665 	int ret = 0;
1666 
1667 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1668 	while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
1669 						struct ufshpb_subregion,
1670 						list_act_srgn))) {
1671 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1672 			break;
1673 
1674 		list_del_init(&srgn->list_act_srgn);
1675 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1676 
1677 		rgn = hpb->rgn_tbl + srgn->rgn_idx;
1678 		ret = ufshpb_add_region(hpb, rgn);
1679 		if (ret)
1680 			goto active_failed;
1681 
1682 		ret = ufshpb_issue_map_req(hpb, rgn, srgn);
1683 		if (ret) {
1684 			dev_err(&hpb->sdev_ufs_lu->sdev_dev,
1685 			    "issue map_req failed. ret %d, region %d - %d\n",
1686 			    ret, rgn->rgn_idx, srgn->srgn_idx);
1687 			goto active_failed;
1688 		}
1689 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1690 	}
1691 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1692 	return;
1693 
1694 active_failed:
1695 	dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
1696 		   rgn->rgn_idx, srgn->srgn_idx);
1697 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1698 	ufshpb_add_active_list(hpb, rgn, srgn);
1699 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1700 }
1701 
ufshpb_run_inactive_region_list(struct ufshpb_lu * hpb)1702 static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
1703 {
1704 	struct ufshpb_region *rgn;
1705 	unsigned long flags;
1706 	int ret;
1707 	LIST_HEAD(pending_list);
1708 
1709 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1710 	while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
1711 					       struct ufshpb_region,
1712 					       list_inact_rgn))) {
1713 		if (ufshpb_get_state(hpb) == HPB_SUSPEND)
1714 			break;
1715 
1716 		list_del_init(&rgn->list_inact_rgn);
1717 		spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1718 
1719 		ret = ufshpb_evict_region(hpb, rgn);
1720 		if (ret) {
1721 			spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1722 			ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
1723 			spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1724 		}
1725 
1726 		spin_lock_irqsave(&hpb->rsp_list_lock, flags);
1727 	}
1728 
1729 	list_splice(&pending_list, &hpb->lh_inact_rgn);
1730 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
1731 }
1732 
ufshpb_normalization_work_handler(struct work_struct * work)1733 static void ufshpb_normalization_work_handler(struct work_struct *work)
1734 {
1735 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
1736 					     ufshpb_normalization_work);
1737 	int rgn_idx;
1738 	u8 factor = hpb->params.normalization_factor;
1739 
1740 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1741 		struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
1742 		int srgn_idx;
1743 
1744 		spin_lock(&rgn->rgn_lock);
1745 		rgn->reads = 0;
1746 		for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
1747 			struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
1748 
1749 			srgn->reads >>= factor;
1750 			rgn->reads += srgn->reads;
1751 		}
1752 		spin_unlock(&rgn->rgn_lock);
1753 
1754 		if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
1755 			continue;
1756 
1757 		/* if region is active but has no reads - inactivate it */
1758 		spin_lock(&hpb->rsp_list_lock);
1759 		ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
1760 		spin_unlock(&hpb->rsp_list_lock);
1761 	}
1762 }
1763 
ufshpb_map_work_handler(struct work_struct * work)1764 static void ufshpb_map_work_handler(struct work_struct *work)
1765 {
1766 	struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
1767 
1768 	if (ufshpb_get_state(hpb) != HPB_PRESENT) {
1769 		dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
1770 			   "%s: ufshpb state is not PRESENT\n", __func__);
1771 		return;
1772 	}
1773 
1774 	ufshpb_run_inactive_region_list(hpb);
1775 	ufshpb_run_active_subregion_list(hpb);
1776 }
1777 
1778 /*
1779  * this function doesn't need to hold lock due to be called in init.
1780  * (rgn_state_lock, rsp_list_lock, etc..)
1781  */
ufshpb_init_pinned_active_region(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1782 static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
1783 					    struct ufshpb_lu *hpb,
1784 					    struct ufshpb_region *rgn)
1785 {
1786 	struct ufshpb_subregion *srgn;
1787 	int srgn_idx, i;
1788 	int err = 0;
1789 
1790 	for_each_sub_region(rgn, srgn_idx, srgn) {
1791 		srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
1792 		srgn->srgn_state = HPB_SRGN_INVALID;
1793 		if (!srgn->mctx) {
1794 			err = -ENOMEM;
1795 			dev_err(hba->dev,
1796 				"alloc mctx for pinned region failed\n");
1797 			goto release;
1798 		}
1799 
1800 		list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
1801 	}
1802 
1803 	rgn->rgn_state = HPB_RGN_PINNED;
1804 	return 0;
1805 
1806 release:
1807 	for (i = 0; i < srgn_idx; i++) {
1808 		srgn = rgn->srgn_tbl + i;
1809 		ufshpb_put_map_ctx(hpb, srgn->mctx);
1810 	}
1811 	return err;
1812 }
1813 
ufshpb_init_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,bool last)1814 static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
1815 				      struct ufshpb_region *rgn, bool last)
1816 {
1817 	int srgn_idx;
1818 	struct ufshpb_subregion *srgn;
1819 
1820 	for_each_sub_region(rgn, srgn_idx, srgn) {
1821 		INIT_LIST_HEAD(&srgn->list_act_srgn);
1822 
1823 		srgn->rgn_idx = rgn->rgn_idx;
1824 		srgn->srgn_idx = srgn_idx;
1825 		srgn->srgn_state = HPB_SRGN_UNUSED;
1826 	}
1827 
1828 	if (unlikely(last && hpb->last_srgn_entries))
1829 		srgn->is_last = true;
1830 }
1831 
ufshpb_alloc_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn,int srgn_cnt)1832 static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
1833 				      struct ufshpb_region *rgn, int srgn_cnt)
1834 {
1835 	rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
1836 				 GFP_KERNEL);
1837 	if (!rgn->srgn_tbl)
1838 		return -ENOMEM;
1839 
1840 	rgn->srgn_cnt = srgn_cnt;
1841 	return 0;
1842 }
1843 
ufshpb_lu_parameter_init(struct ufs_hba * hba,struct ufshpb_lu * hpb,struct ufshpb_dev_info * hpb_dev_info,struct ufshpb_lu_info * hpb_lu_info)1844 static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
1845 				     struct ufshpb_lu *hpb,
1846 				     struct ufshpb_dev_info *hpb_dev_info,
1847 				     struct ufshpb_lu_info *hpb_lu_info)
1848 {
1849 	u32 entries_per_rgn;
1850 	u64 rgn_mem_size, tmp;
1851 
1852 	/* for pre_req */
1853 	hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
1854 
1855 	if (ufshpb_is_legacy(hba))
1856 		hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
1857 	else
1858 		hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
1859 
1860 
1861 	hpb->cur_read_id = 0;
1862 
1863 	hpb->lu_pinned_start = hpb_lu_info->pinned_start;
1864 	hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
1865 		(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
1866 		: PINNED_NOT_SET;
1867 	hpb->lru_info.max_lru_active_cnt =
1868 		hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
1869 
1870 	rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
1871 			* HPB_ENTRY_SIZE;
1872 	do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
1873 	hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
1874 		* HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
1875 
1876 	tmp = rgn_mem_size;
1877 	do_div(tmp, HPB_ENTRY_SIZE);
1878 	entries_per_rgn = (u32)tmp;
1879 	hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
1880 	hpb->entries_per_rgn_mask = entries_per_rgn - 1;
1881 
1882 	hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
1883 	hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
1884 	hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
1885 
1886 	tmp = rgn_mem_size;
1887 	do_div(tmp, hpb->srgn_mem_size);
1888 	hpb->srgns_per_rgn = (int)tmp;
1889 
1890 	hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1891 				entries_per_rgn);
1892 	hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
1893 				(hpb->srgn_mem_size / HPB_ENTRY_SIZE));
1894 	hpb->last_srgn_entries = hpb_lu_info->num_blocks
1895 				 % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
1896 
1897 	hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
1898 
1899 	if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
1900 		hpb->is_hcm = true;
1901 }
1902 
ufshpb_alloc_region_tbl(struct ufs_hba * hba,struct ufshpb_lu * hpb)1903 static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
1904 {
1905 	struct ufshpb_region *rgn_table, *rgn;
1906 	int rgn_idx, i;
1907 	int ret = 0;
1908 
1909 	rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
1910 			    GFP_KERNEL);
1911 	if (!rgn_table)
1912 		return -ENOMEM;
1913 
1914 	hpb->rgn_tbl = rgn_table;
1915 
1916 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1917 		int srgn_cnt = hpb->srgns_per_rgn;
1918 		bool last_srgn = false;
1919 
1920 		rgn = rgn_table + rgn_idx;
1921 		rgn->rgn_idx = rgn_idx;
1922 
1923 		spin_lock_init(&rgn->rgn_lock);
1924 
1925 		INIT_LIST_HEAD(&rgn->list_inact_rgn);
1926 		INIT_LIST_HEAD(&rgn->list_lru_rgn);
1927 		INIT_LIST_HEAD(&rgn->list_expired_rgn);
1928 
1929 		if (rgn_idx == hpb->rgns_per_lu - 1) {
1930 			srgn_cnt = ((hpb->srgns_per_lu - 1) %
1931 				    hpb->srgns_per_rgn) + 1;
1932 			last_srgn = true;
1933 		}
1934 
1935 		ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
1936 		if (ret)
1937 			goto release_srgn_table;
1938 		ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
1939 
1940 		if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
1941 			ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
1942 			if (ret)
1943 				goto release_srgn_table;
1944 		} else {
1945 			rgn->rgn_state = HPB_RGN_INACTIVE;
1946 		}
1947 
1948 		rgn->rgn_flags = 0;
1949 		rgn->hpb = hpb;
1950 	}
1951 
1952 	return 0;
1953 
1954 release_srgn_table:
1955 	for (i = 0; i < rgn_idx; i++) {
1956 		rgn = rgn_table + i;
1957 		kvfree(rgn->srgn_tbl);
1958 	}
1959 	kvfree(rgn_table);
1960 	return ret;
1961 }
1962 
ufshpb_destroy_subregion_tbl(struct ufshpb_lu * hpb,struct ufshpb_region * rgn)1963 static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
1964 					 struct ufshpb_region *rgn)
1965 {
1966 	int srgn_idx;
1967 	struct ufshpb_subregion *srgn;
1968 
1969 	for_each_sub_region(rgn, srgn_idx, srgn)
1970 		if (srgn->srgn_state != HPB_SRGN_UNUSED) {
1971 			srgn->srgn_state = HPB_SRGN_UNUSED;
1972 			ufshpb_put_map_ctx(hpb, srgn->mctx);
1973 		}
1974 }
1975 
ufshpb_destroy_region_tbl(struct ufshpb_lu * hpb)1976 static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
1977 {
1978 	int rgn_idx;
1979 
1980 	for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
1981 		struct ufshpb_region *rgn;
1982 
1983 		rgn = hpb->rgn_tbl + rgn_idx;
1984 		if (rgn->rgn_state != HPB_RGN_INACTIVE) {
1985 			rgn->rgn_state = HPB_RGN_INACTIVE;
1986 
1987 			ufshpb_destroy_subregion_tbl(hpb, rgn);
1988 		}
1989 
1990 		kvfree(rgn->srgn_tbl);
1991 	}
1992 
1993 	kvfree(hpb->rgn_tbl);
1994 }
1995 
1996 /* SYSFS functions */
1997 #define ufshpb_sysfs_attr_show_func(__name)				\
1998 static ssize_t __name##_show(struct device *dev,			\
1999 	struct device_attribute *attr, char *buf)			\
2000 {									\
2001 	struct scsi_device *sdev = to_scsi_device(dev);			\
2002 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
2003 									\
2004 	if (!hpb)							\
2005 		return -ENODEV;						\
2006 									\
2007 	return sysfs_emit(buf, "%llu\n", hpb->stats.__name);		\
2008 }									\
2009 \
2010 static DEVICE_ATTR_RO(__name)
2011 
2012 ufshpb_sysfs_attr_show_func(hit_cnt);
2013 ufshpb_sysfs_attr_show_func(miss_cnt);
2014 ufshpb_sysfs_attr_show_func(rb_noti_cnt);
2015 ufshpb_sysfs_attr_show_func(rb_active_cnt);
2016 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
2017 ufshpb_sysfs_attr_show_func(map_req_cnt);
2018 ufshpb_sysfs_attr_show_func(umap_req_cnt);
2019 
2020 static struct attribute *hpb_dev_stat_attrs[] = {
2021 	&dev_attr_hit_cnt.attr,
2022 	&dev_attr_miss_cnt.attr,
2023 	&dev_attr_rb_noti_cnt.attr,
2024 	&dev_attr_rb_active_cnt.attr,
2025 	&dev_attr_rb_inactive_cnt.attr,
2026 	&dev_attr_map_req_cnt.attr,
2027 	&dev_attr_umap_req_cnt.attr,
2028 	NULL,
2029 };
2030 
2031 struct attribute_group ufs_sysfs_hpb_stat_group = {
2032 	.name = "hpb_stats",
2033 	.attrs = hpb_dev_stat_attrs,
2034 };
2035 
2036 /* SYSFS functions */
2037 #define ufshpb_sysfs_param_show_func(__name)				\
2038 static ssize_t __name##_show(struct device *dev,			\
2039 	struct device_attribute *attr, char *buf)			\
2040 {									\
2041 	struct scsi_device *sdev = to_scsi_device(dev);			\
2042 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);		\
2043 									\
2044 	if (!hpb)							\
2045 		return -ENODEV;						\
2046 									\
2047 	return sysfs_emit(buf, "%d\n", hpb->params.__name);		\
2048 }
2049 
2050 ufshpb_sysfs_param_show_func(requeue_timeout_ms);
2051 static ssize_t
requeue_timeout_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2052 requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
2053 			 const char *buf, size_t count)
2054 {
2055 	struct scsi_device *sdev = to_scsi_device(dev);
2056 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2057 	int val;
2058 
2059 	if (!hpb)
2060 		return -ENODEV;
2061 
2062 	if (kstrtouint(buf, 0, &val))
2063 		return -EINVAL;
2064 
2065 	if (val < 0)
2066 		return -EINVAL;
2067 
2068 	hpb->params.requeue_timeout_ms = val;
2069 
2070 	return count;
2071 }
2072 static DEVICE_ATTR_RW(requeue_timeout_ms);
2073 
2074 ufshpb_sysfs_param_show_func(activation_thld);
2075 static ssize_t
activation_thld_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2076 activation_thld_store(struct device *dev, struct device_attribute *attr,
2077 		      const char *buf, size_t count)
2078 {
2079 	struct scsi_device *sdev = to_scsi_device(dev);
2080 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2081 	int val;
2082 
2083 	if (!hpb)
2084 		return -ENODEV;
2085 
2086 	if (!hpb->is_hcm)
2087 		return -EOPNOTSUPP;
2088 
2089 	if (kstrtouint(buf, 0, &val))
2090 		return -EINVAL;
2091 
2092 	if (val <= 0)
2093 		return -EINVAL;
2094 
2095 	hpb->params.activation_thld = val;
2096 
2097 	return count;
2098 }
2099 static DEVICE_ATTR_RW(activation_thld);
2100 
2101 ufshpb_sysfs_param_show_func(normalization_factor);
2102 static ssize_t
normalization_factor_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2103 normalization_factor_store(struct device *dev, struct device_attribute *attr,
2104 			   const char *buf, size_t count)
2105 {
2106 	struct scsi_device *sdev = to_scsi_device(dev);
2107 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2108 	int val;
2109 
2110 	if (!hpb)
2111 		return -ENODEV;
2112 
2113 	if (!hpb->is_hcm)
2114 		return -EOPNOTSUPP;
2115 
2116 	if (kstrtouint(buf, 0, &val))
2117 		return -EINVAL;
2118 
2119 	if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
2120 		return -EINVAL;
2121 
2122 	hpb->params.normalization_factor = val;
2123 
2124 	return count;
2125 }
2126 static DEVICE_ATTR_RW(normalization_factor);
2127 
2128 ufshpb_sysfs_param_show_func(eviction_thld_enter);
2129 static ssize_t
eviction_thld_enter_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2130 eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
2131 			  const char *buf, size_t count)
2132 {
2133 	struct scsi_device *sdev = to_scsi_device(dev);
2134 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2135 	int val;
2136 
2137 	if (!hpb)
2138 		return -ENODEV;
2139 
2140 	if (!hpb->is_hcm)
2141 		return -EOPNOTSUPP;
2142 
2143 	if (kstrtouint(buf, 0, &val))
2144 		return -EINVAL;
2145 
2146 	if (val <= hpb->params.eviction_thld_exit)
2147 		return -EINVAL;
2148 
2149 	hpb->params.eviction_thld_enter = val;
2150 
2151 	return count;
2152 }
2153 static DEVICE_ATTR_RW(eviction_thld_enter);
2154 
2155 ufshpb_sysfs_param_show_func(eviction_thld_exit);
2156 static ssize_t
eviction_thld_exit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2157 eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
2158 			 const char *buf, size_t count)
2159 {
2160 	struct scsi_device *sdev = to_scsi_device(dev);
2161 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2162 	int val;
2163 
2164 	if (!hpb)
2165 		return -ENODEV;
2166 
2167 	if (!hpb->is_hcm)
2168 		return -EOPNOTSUPP;
2169 
2170 	if (kstrtouint(buf, 0, &val))
2171 		return -EINVAL;
2172 
2173 	if (val <= hpb->params.activation_thld)
2174 		return -EINVAL;
2175 
2176 	hpb->params.eviction_thld_exit = val;
2177 
2178 	return count;
2179 }
2180 static DEVICE_ATTR_RW(eviction_thld_exit);
2181 
2182 ufshpb_sysfs_param_show_func(read_timeout_ms);
2183 static ssize_t
read_timeout_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2184 read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
2185 		      const char *buf, size_t count)
2186 {
2187 	struct scsi_device *sdev = to_scsi_device(dev);
2188 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2189 	int val;
2190 
2191 	if (!hpb)
2192 		return -ENODEV;
2193 
2194 	if (!hpb->is_hcm)
2195 		return -EOPNOTSUPP;
2196 
2197 	if (kstrtouint(buf, 0, &val))
2198 		return -EINVAL;
2199 
2200 	/* read_timeout >> timeout_polling_interval */
2201 	if (val < hpb->params.timeout_polling_interval_ms * 2)
2202 		return -EINVAL;
2203 
2204 	hpb->params.read_timeout_ms = val;
2205 
2206 	return count;
2207 }
2208 static DEVICE_ATTR_RW(read_timeout_ms);
2209 
2210 ufshpb_sysfs_param_show_func(read_timeout_expiries);
2211 static ssize_t
read_timeout_expiries_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2212 read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
2213 			    const char *buf, size_t count)
2214 {
2215 	struct scsi_device *sdev = to_scsi_device(dev);
2216 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2217 	int val;
2218 
2219 	if (!hpb)
2220 		return -ENODEV;
2221 
2222 	if (!hpb->is_hcm)
2223 		return -EOPNOTSUPP;
2224 
2225 	if (kstrtouint(buf, 0, &val))
2226 		return -EINVAL;
2227 
2228 	if (val <= 0)
2229 		return -EINVAL;
2230 
2231 	hpb->params.read_timeout_expiries = val;
2232 
2233 	return count;
2234 }
2235 static DEVICE_ATTR_RW(read_timeout_expiries);
2236 
2237 ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
2238 static ssize_t
timeout_polling_interval_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2239 timeout_polling_interval_ms_store(struct device *dev,
2240 				  struct device_attribute *attr,
2241 				  const char *buf, size_t count)
2242 {
2243 	struct scsi_device *sdev = to_scsi_device(dev);
2244 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2245 	int val;
2246 
2247 	if (!hpb)
2248 		return -ENODEV;
2249 
2250 	if (!hpb->is_hcm)
2251 		return -EOPNOTSUPP;
2252 
2253 	if (kstrtouint(buf, 0, &val))
2254 		return -EINVAL;
2255 
2256 	/* timeout_polling_interval << read_timeout */
2257 	if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
2258 		return -EINVAL;
2259 
2260 	hpb->params.timeout_polling_interval_ms = val;
2261 
2262 	return count;
2263 }
2264 static DEVICE_ATTR_RW(timeout_polling_interval_ms);
2265 
2266 ufshpb_sysfs_param_show_func(inflight_map_req);
inflight_map_req_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2267 static ssize_t inflight_map_req_store(struct device *dev,
2268 				      struct device_attribute *attr,
2269 				      const char *buf, size_t count)
2270 {
2271 	struct scsi_device *sdev = to_scsi_device(dev);
2272 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2273 	int val;
2274 
2275 	if (!hpb)
2276 		return -ENODEV;
2277 
2278 	if (!hpb->is_hcm)
2279 		return -EOPNOTSUPP;
2280 
2281 	if (kstrtouint(buf, 0, &val))
2282 		return -EINVAL;
2283 
2284 	if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
2285 		return -EINVAL;
2286 
2287 	hpb->params.inflight_map_req = val;
2288 
2289 	return count;
2290 }
2291 static DEVICE_ATTR_RW(inflight_map_req);
2292 
2293 
ufshpb_hcm_param_init(struct ufshpb_lu * hpb)2294 static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
2295 {
2296 	hpb->params.activation_thld = ACTIVATION_THRESHOLD;
2297 	hpb->params.normalization_factor = 1;
2298 	hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
2299 	hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
2300 	hpb->params.read_timeout_ms = READ_TO_MS;
2301 	hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
2302 	hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
2303 	hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
2304 }
2305 
2306 static struct attribute *hpb_dev_param_attrs[] = {
2307 	&dev_attr_requeue_timeout_ms.attr,
2308 	&dev_attr_activation_thld.attr,
2309 	&dev_attr_normalization_factor.attr,
2310 	&dev_attr_eviction_thld_enter.attr,
2311 	&dev_attr_eviction_thld_exit.attr,
2312 	&dev_attr_read_timeout_ms.attr,
2313 	&dev_attr_read_timeout_expiries.attr,
2314 	&dev_attr_timeout_polling_interval_ms.attr,
2315 	&dev_attr_inflight_map_req.attr,
2316 	NULL,
2317 };
2318 
2319 struct attribute_group ufs_sysfs_hpb_param_group = {
2320 	.name = "hpb_params",
2321 	.attrs = hpb_dev_param_attrs,
2322 };
2323 
ufshpb_pre_req_mempool_init(struct ufshpb_lu * hpb)2324 static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
2325 {
2326 	struct ufshpb_req *pre_req = NULL, *t;
2327 	int qd = hpb->sdev_ufs_lu->queue_depth / 2;
2328 	int i;
2329 
2330 	INIT_LIST_HEAD(&hpb->lh_pre_req_free);
2331 
2332 	hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
2333 	hpb->throttle_pre_req = qd;
2334 	hpb->num_inflight_pre_req = 0;
2335 
2336 	if (!hpb->pre_req)
2337 		goto release_mem;
2338 
2339 	for (i = 0; i < qd; i++) {
2340 		pre_req = hpb->pre_req + i;
2341 		INIT_LIST_HEAD(&pre_req->list_req);
2342 		pre_req->req = NULL;
2343 
2344 		pre_req->bio = bio_alloc(GFP_KERNEL, 1);
2345 		if (!pre_req->bio)
2346 			goto release_mem;
2347 
2348 		pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2349 		if (!pre_req->wb.m_page) {
2350 			bio_put(pre_req->bio);
2351 			goto release_mem;
2352 		}
2353 
2354 		list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
2355 	}
2356 
2357 	return 0;
2358 release_mem:
2359 	list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
2360 		list_del_init(&pre_req->list_req);
2361 		bio_put(pre_req->bio);
2362 		__free_page(pre_req->wb.m_page);
2363 	}
2364 
2365 	kfree(hpb->pre_req);
2366 	return -ENOMEM;
2367 }
2368 
ufshpb_pre_req_mempool_destroy(struct ufshpb_lu * hpb)2369 static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
2370 {
2371 	struct ufshpb_req *pre_req = NULL;
2372 	int i;
2373 
2374 	for (i = 0; i < hpb->throttle_pre_req; i++) {
2375 		pre_req = hpb->pre_req + i;
2376 		bio_put(hpb->pre_req[i].bio);
2377 		if (!pre_req->wb.m_page)
2378 			__free_page(hpb->pre_req[i].wb.m_page);
2379 		list_del_init(&pre_req->list_req);
2380 	}
2381 
2382 	kfree(hpb->pre_req);
2383 }
2384 
ufshpb_stat_init(struct ufshpb_lu * hpb)2385 static void ufshpb_stat_init(struct ufshpb_lu *hpb)
2386 {
2387 	hpb->stats.hit_cnt = 0;
2388 	hpb->stats.miss_cnt = 0;
2389 	hpb->stats.rb_noti_cnt = 0;
2390 	hpb->stats.rb_active_cnt = 0;
2391 	hpb->stats.rb_inactive_cnt = 0;
2392 	hpb->stats.map_req_cnt = 0;
2393 	hpb->stats.umap_req_cnt = 0;
2394 }
2395 
ufshpb_param_init(struct ufshpb_lu * hpb)2396 static void ufshpb_param_init(struct ufshpb_lu *hpb)
2397 {
2398 	hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
2399 	if (hpb->is_hcm)
2400 		ufshpb_hcm_param_init(hpb);
2401 }
2402 
ufshpb_lu_hpb_init(struct ufs_hba * hba,struct ufshpb_lu * hpb)2403 static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
2404 {
2405 	int ret;
2406 
2407 	spin_lock_init(&hpb->rgn_state_lock);
2408 	spin_lock_init(&hpb->rsp_list_lock);
2409 	spin_lock_init(&hpb->param_lock);
2410 
2411 	INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
2412 	INIT_LIST_HEAD(&hpb->lh_act_srgn);
2413 	INIT_LIST_HEAD(&hpb->lh_inact_rgn);
2414 	INIT_LIST_HEAD(&hpb->list_hpb_lu);
2415 
2416 	INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
2417 	if (hpb->is_hcm) {
2418 		INIT_WORK(&hpb->ufshpb_normalization_work,
2419 			  ufshpb_normalization_work_handler);
2420 		INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
2421 				  ufshpb_read_to_handler);
2422 	}
2423 
2424 	hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
2425 			  sizeof(struct ufshpb_req), 0, 0, NULL);
2426 	if (!hpb->map_req_cache) {
2427 		dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
2428 			hpb->lun);
2429 		return -ENOMEM;
2430 	}
2431 
2432 	hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
2433 			  sizeof(struct page *) * hpb->pages_per_srgn,
2434 			  0, 0, NULL);
2435 	if (!hpb->m_page_cache) {
2436 		dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
2437 			hpb->lun);
2438 		ret = -ENOMEM;
2439 		goto release_req_cache;
2440 	}
2441 
2442 	ret = ufshpb_pre_req_mempool_init(hpb);
2443 	if (ret) {
2444 		dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
2445 			hpb->lun);
2446 		goto release_m_page_cache;
2447 	}
2448 
2449 	ret = ufshpb_alloc_region_tbl(hba, hpb);
2450 	if (ret)
2451 		goto release_pre_req_mempool;
2452 
2453 	ufshpb_stat_init(hpb);
2454 	ufshpb_param_init(hpb);
2455 
2456 	if (hpb->is_hcm) {
2457 		unsigned int poll;
2458 
2459 		poll = hpb->params.timeout_polling_interval_ms;
2460 		schedule_delayed_work(&hpb->ufshpb_read_to_work,
2461 				      msecs_to_jiffies(poll));
2462 	}
2463 
2464 	return 0;
2465 
2466 release_pre_req_mempool:
2467 	ufshpb_pre_req_mempool_destroy(hpb);
2468 release_m_page_cache:
2469 	kmem_cache_destroy(hpb->m_page_cache);
2470 release_req_cache:
2471 	kmem_cache_destroy(hpb->map_req_cache);
2472 	return ret;
2473 }
2474 
2475 static struct ufshpb_lu *
ufshpb_alloc_hpb_lu(struct ufs_hba * hba,struct scsi_device * sdev,struct ufshpb_dev_info * hpb_dev_info,struct ufshpb_lu_info * hpb_lu_info)2476 ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
2477 		    struct ufshpb_dev_info *hpb_dev_info,
2478 		    struct ufshpb_lu_info *hpb_lu_info)
2479 {
2480 	struct ufshpb_lu *hpb;
2481 	int ret;
2482 
2483 	hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
2484 	if (!hpb)
2485 		return NULL;
2486 
2487 	hpb->lun = sdev->lun;
2488 	hpb->sdev_ufs_lu = sdev;
2489 
2490 	ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
2491 
2492 	ret = ufshpb_lu_hpb_init(hba, hpb);
2493 	if (ret) {
2494 		dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
2495 		goto release_hpb;
2496 	}
2497 
2498 	sdev->hostdata = hpb;
2499 	return hpb;
2500 
2501 release_hpb:
2502 	kfree(hpb);
2503 	return NULL;
2504 }
2505 
ufshpb_discard_rsp_lists(struct ufshpb_lu * hpb)2506 static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
2507 {
2508 	struct ufshpb_region *rgn, *next_rgn;
2509 	struct ufshpb_subregion *srgn, *next_srgn;
2510 	unsigned long flags;
2511 
2512 	/*
2513 	 * If the device reset occurred, the remained HPB region information
2514 	 * may be stale. Therefore, by dicarding the lists of HPB response
2515 	 * that remained after reset, it prevents unnecessary work.
2516 	 */
2517 	spin_lock_irqsave(&hpb->rsp_list_lock, flags);
2518 	list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
2519 				 list_inact_rgn)
2520 		list_del_init(&rgn->list_inact_rgn);
2521 
2522 	list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
2523 				 list_act_srgn)
2524 		list_del_init(&srgn->list_act_srgn);
2525 	spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
2526 }
2527 
ufshpb_cancel_jobs(struct ufshpb_lu * hpb)2528 static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
2529 {
2530 	if (hpb->is_hcm) {
2531 		cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
2532 		cancel_work_sync(&hpb->ufshpb_normalization_work);
2533 	}
2534 	cancel_work_sync(&hpb->map_work);
2535 }
2536 
ufshpb_check_hpb_reset_query(struct ufs_hba * hba)2537 static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
2538 {
2539 	int err = 0;
2540 	bool flag_res = true;
2541 	int try;
2542 
2543 	/* wait for the device to complete HPB reset query */
2544 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2545 		dev_dbg(hba->dev,
2546 			"%s start flag reset polling %d times\n",
2547 			__func__, try);
2548 
2549 		/* Poll fHpbReset flag to be cleared */
2550 		err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
2551 				QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
2552 
2553 		if (err) {
2554 			dev_err(hba->dev,
2555 				"%s reading fHpbReset flag failed with error %d\n",
2556 				__func__, err);
2557 			return flag_res;
2558 		}
2559 
2560 		if (!flag_res)
2561 			goto out;
2562 
2563 		usleep_range(1000, 1100);
2564 	}
2565 	if (flag_res) {
2566 		dev_err(hba->dev,
2567 			"%s fHpbReset was not cleared by the device\n",
2568 			__func__);
2569 	}
2570 out:
2571 	return flag_res;
2572 }
2573 
ufshpb_reset(struct ufs_hba * hba)2574 void ufshpb_reset(struct ufs_hba *hba)
2575 {
2576 	struct ufshpb_lu *hpb;
2577 	struct scsi_device *sdev;
2578 
2579 	shost_for_each_device(sdev, hba->host) {
2580 		hpb = ufshpb_get_hpb_data(sdev);
2581 		if (!hpb)
2582 			continue;
2583 
2584 		if (ufshpb_get_state(hpb) != HPB_RESET)
2585 			continue;
2586 
2587 		ufshpb_set_state(hpb, HPB_PRESENT);
2588 	}
2589 }
2590 
ufshpb_reset_host(struct ufs_hba * hba)2591 void ufshpb_reset_host(struct ufs_hba *hba)
2592 {
2593 	struct ufshpb_lu *hpb;
2594 	struct scsi_device *sdev;
2595 
2596 	shost_for_each_device(sdev, hba->host) {
2597 		hpb = ufshpb_get_hpb_data(sdev);
2598 		if (!hpb)
2599 			continue;
2600 
2601 		if (ufshpb_get_state(hpb) != HPB_PRESENT)
2602 			continue;
2603 		ufshpb_set_state(hpb, HPB_RESET);
2604 		ufshpb_cancel_jobs(hpb);
2605 		ufshpb_discard_rsp_lists(hpb);
2606 	}
2607 }
2608 
ufshpb_suspend(struct ufs_hba * hba)2609 void ufshpb_suspend(struct ufs_hba *hba)
2610 {
2611 	struct ufshpb_lu *hpb;
2612 	struct scsi_device *sdev;
2613 
2614 	shost_for_each_device(sdev, hba->host) {
2615 		hpb = ufshpb_get_hpb_data(sdev);
2616 		if (!hpb)
2617 			continue;
2618 
2619 		if (ufshpb_get_state(hpb) != HPB_PRESENT)
2620 			continue;
2621 		ufshpb_set_state(hpb, HPB_SUSPEND);
2622 		ufshpb_cancel_jobs(hpb);
2623 	}
2624 }
2625 
ufshpb_resume(struct ufs_hba * hba)2626 void ufshpb_resume(struct ufs_hba *hba)
2627 {
2628 	struct ufshpb_lu *hpb;
2629 	struct scsi_device *sdev;
2630 
2631 	shost_for_each_device(sdev, hba->host) {
2632 		hpb = ufshpb_get_hpb_data(sdev);
2633 		if (!hpb)
2634 			continue;
2635 
2636 		if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
2637 		    (ufshpb_get_state(hpb) != HPB_SUSPEND))
2638 			continue;
2639 		ufshpb_set_state(hpb, HPB_PRESENT);
2640 		ufshpb_kick_map_work(hpb);
2641 		if (hpb->is_hcm) {
2642 			unsigned int poll =
2643 				hpb->params.timeout_polling_interval_ms;
2644 
2645 			schedule_delayed_work(&hpb->ufshpb_read_to_work,
2646 				msecs_to_jiffies(poll));
2647 		}
2648 	}
2649 }
2650 
ufshpb_get_lu_info(struct ufs_hba * hba,int lun,struct ufshpb_lu_info * hpb_lu_info)2651 static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
2652 			      struct ufshpb_lu_info *hpb_lu_info)
2653 {
2654 	u16 max_active_rgns;
2655 	u8 lu_enable;
2656 	int size;
2657 	int ret;
2658 	char desc_buf[QUERY_DESC_MAX_SIZE];
2659 
2660 	ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
2661 
2662 	pm_runtime_get_sync(hba->dev);
2663 	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2664 					    QUERY_DESC_IDN_UNIT, lun, 0,
2665 					    desc_buf, &size);
2666 	pm_runtime_put_sync(hba->dev);
2667 
2668 	if (ret) {
2669 		dev_err(hba->dev,
2670 			"%s: idn: %d lun: %d  query request failed",
2671 			__func__, QUERY_DESC_IDN_UNIT, lun);
2672 		return ret;
2673 	}
2674 
2675 	lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
2676 	if (lu_enable != LU_ENABLED_HPB_FUNC)
2677 		return -ENODEV;
2678 
2679 	max_active_rgns = get_unaligned_be16(
2680 			desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
2681 	if (!max_active_rgns) {
2682 		dev_err(hba->dev,
2683 			"lun %d wrong number of max active regions\n", lun);
2684 		return -ENODEV;
2685 	}
2686 
2687 	hpb_lu_info->num_blocks = get_unaligned_be64(
2688 			desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
2689 	hpb_lu_info->pinned_start = get_unaligned_be16(
2690 			desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
2691 	hpb_lu_info->num_pinned = get_unaligned_be16(
2692 			desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
2693 	hpb_lu_info->max_active_rgns = max_active_rgns;
2694 
2695 	return 0;
2696 }
2697 
ufshpb_destroy_lu(struct ufs_hba * hba,struct scsi_device * sdev)2698 void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2699 {
2700 	struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
2701 
2702 	if (!hpb)
2703 		return;
2704 
2705 	ufshpb_set_state(hpb, HPB_FAILED);
2706 
2707 	sdev = hpb->sdev_ufs_lu;
2708 	sdev->hostdata = NULL;
2709 
2710 	ufshpb_cancel_jobs(hpb);
2711 
2712 	ufshpb_pre_req_mempool_destroy(hpb);
2713 	ufshpb_destroy_region_tbl(hpb);
2714 
2715 	kmem_cache_destroy(hpb->map_req_cache);
2716 	kmem_cache_destroy(hpb->m_page_cache);
2717 
2718 	list_del_init(&hpb->list_hpb_lu);
2719 
2720 	kfree(hpb);
2721 }
2722 
ufshpb_hpb_lu_prepared(struct ufs_hba * hba)2723 static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
2724 {
2725 	int pool_size;
2726 	struct ufshpb_lu *hpb;
2727 	struct scsi_device *sdev;
2728 	bool init_success;
2729 
2730 	if (tot_active_srgn_pages == 0) {
2731 		ufshpb_remove(hba);
2732 		return;
2733 	}
2734 
2735 	init_success = !ufshpb_check_hpb_reset_query(hba);
2736 
2737 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2738 	if (pool_size > tot_active_srgn_pages) {
2739 		mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
2740 		mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
2741 	}
2742 
2743 	shost_for_each_device(sdev, hba->host) {
2744 		hpb = ufshpb_get_hpb_data(sdev);
2745 		if (!hpb)
2746 			continue;
2747 
2748 		if (init_success) {
2749 			ufshpb_set_state(hpb, HPB_PRESENT);
2750 			if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
2751 				queue_work(ufshpb_wq, &hpb->map_work);
2752 			if (!hpb->is_hcm)
2753 				ufshpb_issue_umap_all_req(hpb);
2754 		} else {
2755 			dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
2756 			ufshpb_destroy_lu(hba, sdev);
2757 		}
2758 	}
2759 
2760 	if (!init_success)
2761 		ufshpb_remove(hba);
2762 }
2763 
ufshpb_init_hpb_lu(struct ufs_hba * hba,struct scsi_device * sdev)2764 void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
2765 {
2766 	struct ufshpb_lu *hpb;
2767 	int ret;
2768 	struct ufshpb_lu_info hpb_lu_info = { 0 };
2769 	int lun = sdev->lun;
2770 
2771 	if (lun >= hba->dev_info.max_lu_supported)
2772 		goto out;
2773 
2774 	ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
2775 	if (ret)
2776 		goto out;
2777 
2778 	hpb = ufshpb_alloc_hpb_lu(hba, sdev, ufs_hba_to_hpb(hba), &hpb_lu_info);
2779 	if (!hpb)
2780 		goto out;
2781 
2782 	tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
2783 			hpb->srgns_per_rgn * hpb->pages_per_srgn;
2784 
2785 out:
2786 	/* All LUs are initialized */
2787 	if (atomic_dec_and_test(&ufs_hba_to_hpb(hba)->slave_conf_cnt))
2788 		ufshpb_hpb_lu_prepared(hba);
2789 }
2790 
ufshpb_init_mem_wq(struct ufs_hba * hba)2791 static int ufshpb_init_mem_wq(struct ufs_hba *hba)
2792 {
2793 	int ret;
2794 	unsigned int pool_size;
2795 
2796 	ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
2797 					sizeof(struct ufshpb_map_ctx),
2798 					0, 0, NULL);
2799 	if (!ufshpb_mctx_cache) {
2800 		dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
2801 		return -ENOMEM;
2802 	}
2803 
2804 	pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE;
2805 	dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
2806 	       __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
2807 
2808 	ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
2809 						    ufshpb_mctx_cache);
2810 	if (!ufshpb_mctx_pool) {
2811 		dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
2812 		ret = -ENOMEM;
2813 		goto release_mctx_cache;
2814 	}
2815 
2816 	ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
2817 	if (!ufshpb_page_pool) {
2818 		dev_err(hba->dev, "ufshpb: cannot init page pool\n");
2819 		ret = -ENOMEM;
2820 		goto release_mctx_pool;
2821 	}
2822 
2823 	ufshpb_wq = alloc_workqueue("ufshpb-wq",
2824 					WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
2825 	if (!ufshpb_wq) {
2826 		dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
2827 		ret = -ENOMEM;
2828 		goto release_page_pool;
2829 	}
2830 
2831 	return 0;
2832 
2833 release_page_pool:
2834 	mempool_destroy(ufshpb_page_pool);
2835 release_mctx_pool:
2836 	mempool_destroy(ufshpb_mctx_pool);
2837 release_mctx_cache:
2838 	kmem_cache_destroy(ufshpb_mctx_cache);
2839 	return ret;
2840 }
2841 
ufshpb_get_geo_info(struct ufs_hba * hba,u8 * geo_buf)2842 void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
2843 {
2844 	struct ufshpb_dev_info *hpb_info = ufs_hba_to_hpb(hba);
2845 	int max_active_rgns = 0;
2846 	int hpb_num_lu;
2847 
2848 	hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
2849 	if (hpb_num_lu == 0) {
2850 		dev_err(hba->dev, "No HPB LU supported\n");
2851 		hpb_info->hpb_disabled = true;
2852 		return;
2853 	}
2854 
2855 	hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
2856 	hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
2857 	max_active_rgns = get_unaligned_be16(geo_buf +
2858 			  GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
2859 
2860 	if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
2861 	    max_active_rgns == 0) {
2862 		dev_err(hba->dev, "No HPB supported device\n");
2863 		hpb_info->hpb_disabled = true;
2864 		return;
2865 	}
2866 }
2867 
ufshpb_get_dev_info(struct ufs_hba * hba,u8 * desc_buf)2868 void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
2869 {
2870 	struct ufshpb_dev_info *hpb_dev_info = ufs_hba_to_hpb(hba);
2871 	int version, ret;
2872 	u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW;
2873 
2874 	hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
2875 
2876 	version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER) & HPB_MAJOR_VERSION_MASK;
2877 	if ((version != HPB_SUPPORT_VERSION) &&
2878 	    (version != HPB_SUPPORT_LEGACY_VERSION)) {
2879 		dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
2880 			__func__, version);
2881 		hpb_dev_info->hpb_disabled = true;
2882 		return;
2883 	}
2884 
2885 	if (version == HPB_SUPPORT_LEGACY_VERSION)
2886 		hpb_dev_info->is_legacy = true;
2887 
2888 	pm_runtime_get_sync(hba->dev);
2889 	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2890 		QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd);
2891 	pm_runtime_put_sync(hba->dev);
2892 
2893 	if (ret)
2894 		dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed",
2895 			__func__);
2896 	hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd;
2897 
2898 	/*
2899 	 * Get the number of user logical unit to check whether all
2900 	 * scsi_device finish initialization
2901 	 */
2902 	hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
2903 }
2904 
ufshpb_init(struct ufs_hba * hba)2905 void ufshpb_init(struct ufs_hba *hba)
2906 {
2907 	struct ufshpb_dev_info *hpb_dev_info = ufs_hba_to_hpb(hba);
2908 	int try;
2909 	int ret;
2910 
2911 	if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
2912 		return;
2913 
2914 	if (ufshpb_init_mem_wq(hba)) {
2915 		hpb_dev_info->hpb_disabled = true;
2916 		return;
2917 	}
2918 
2919 	atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
2920 	tot_active_srgn_pages = 0;
2921 	/* issue HPB reset query */
2922 	for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
2923 		ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2924 					QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
2925 		if (!ret)
2926 			break;
2927 	}
2928 }
2929 
ufshpb_remove(struct ufs_hba * hba)2930 void ufshpb_remove(struct ufs_hba *hba)
2931 {
2932 	mempool_destroy(ufshpb_page_pool);
2933 	mempool_destroy(ufshpb_mctx_pool);
2934 	kmem_cache_destroy(ufshpb_mctx_cache);
2935 
2936 	destroy_workqueue(ufshpb_wq);
2937 }
2938 
2939 module_param(ufshpb_host_map_kbytes, uint, 0644);
2940 MODULE_PARM_DESC(ufshpb_host_map_kbytes,
2941 	"ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
2942