• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Linux MegaRAID driver for SAS based RAID controllers
4  *
5  *  Copyright (c) 2009-2013  LSI Corporation
6  *  Copyright (c) 2013-2016  Avago Technologies
7  *  Copyright (c) 2016-2018  Broadcom Inc.
8  *
9  *  FILE: megaraid_sas_fp.c
10  *
11  *  Authors: Broadcom Inc.
12  *           Sumant Patro
13  *           Varad Talamacki
14  *           Manoj Jose
15  *           Kashyap Desai <kashyap.desai@broadcom.com>
16  *           Sumit Saxena <sumit.saxena@broadcom.com>
17  *
18  *  Send feedback to: megaraidlinux.pdl@broadcom.com
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/list.h>
25 #include <linux/moduleparam.h>
26 #include <linux/module.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/uio.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/compat.h>
34 #include <linux/blkdev.h>
35 #include <linux/poll.h>
36 #include <linux/irq_poll.h>
37 
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 
43 #include "megaraid_sas_fusion.h"
44 #include "megaraid_sas.h"
45 #include <asm/div64.h>
46 
47 #define LB_PENDING_CMDS_DEFAULT 4
48 static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
49 module_param(lb_pending_cmds, int, 0444);
50 MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
51 	"threshold. Valid Values are 1-128. Default: 4");
52 
53 
54 #define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
55 #define MR_LD_STATE_OPTIMAL 3
56 
57 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
58 #define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
59 #define SPAN_INVALID  0xff
60 
61 /* Prototypes */
62 static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
63 	PLD_SPAN_INFO ldSpanInfo);
64 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
65 	u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
66 	struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
67 static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
68 	u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
69 
mega_mod64(u64 dividend,u32 divisor)70 u32 mega_mod64(u64 dividend, u32 divisor)
71 {
72 	u64 d;
73 	u32 remainder;
74 
75 	if (!divisor)
76 		printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
77 	d = dividend;
78 	remainder = do_div(d, divisor);
79 	return remainder;
80 }
81 
82 /**
83  * @param dividend    : Dividend
84  * @param divisor    : Divisor
85  *
86  * @return quotient
87  **/
mega_div64_32(uint64_t dividend,uint32_t divisor)88 u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
89 {
90 	u32 remainder;
91 	u64 d;
92 
93 	if (!divisor)
94 		printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
95 
96 	d = dividend;
97 	remainder = do_div(d, divisor);
98 
99 	return d;
100 }
101 
MR_LdRaidGet(u32 ld,struct MR_DRV_RAID_MAP_ALL * map)102 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
103 {
104 	return &map->raidMap.ldSpanMap[ld].ldRaid;
105 }
106 
MR_LdSpanInfoGet(u32 ld,struct MR_DRV_RAID_MAP_ALL * map)107 static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
108 						   struct MR_DRV_RAID_MAP_ALL
109 						   *map)
110 {
111 	return &map->raidMap.ldSpanMap[ld].spanBlock[0];
112 }
113 
MR_LdDataArmGet(u32 ld,u32 armIdx,struct MR_DRV_RAID_MAP_ALL * map)114 static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
115 {
116 	return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
117 }
118 
MR_ArPdGet(u32 ar,u32 arm,struct MR_DRV_RAID_MAP_ALL * map)119 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
120 {
121 	return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
122 }
123 
MR_LdSpanArrayGet(u32 ld,u32 span,struct MR_DRV_RAID_MAP_ALL * map)124 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
125 {
126 	return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
127 }
128 
MR_PdDevHandleGet(u32 pd,struct MR_DRV_RAID_MAP_ALL * map)129 __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
130 {
131 	return map->raidMap.devHndlInfo[pd].curDevHdl;
132 }
133 
MR_PdInterfaceTypeGet(u32 pd,struct MR_DRV_RAID_MAP_ALL * map)134 static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
135 {
136 	return map->raidMap.devHndlInfo[pd].interfaceType;
137 }
138 
MR_GetLDTgtId(u32 ld,struct MR_DRV_RAID_MAP_ALL * map)139 u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
140 {
141 	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
142 }
143 
MR_TargetIdToLdGet(u32 ldTgtId,struct MR_DRV_RAID_MAP_ALL * map)144 u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
145 {
146 	return map->raidMap.ldTgtIdToLd[ldTgtId];
147 }
148 
MR_LdSpanPtrGet(u32 ld,u32 span,struct MR_DRV_RAID_MAP_ALL * map)149 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
150 					  struct MR_DRV_RAID_MAP_ALL *map)
151 {
152 	return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
153 }
154 
155 /*
156  * This function will Populate Driver Map using firmware raid map
157  */
MR_PopulateDrvRaidMap(struct megasas_instance * instance,u64 map_id)158 static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
159 {
160 	struct fusion_context *fusion = instance->ctrl_context;
161 	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
162 	struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
163 	int i, j;
164 	u16 ld_count;
165 	struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
166 	struct MR_FW_RAID_MAP_EXT *fw_map_ext;
167 	struct MR_RAID_MAP_DESC_TABLE *desc_table;
168 
169 
170 	struct MR_DRV_RAID_MAP_ALL *drv_map =
171 			fusion->ld_drv_map[(map_id & 1)];
172 	struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
173 	void *raid_map_data = NULL;
174 
175 	memset(drv_map, 0, fusion->drv_map_sz);
176 	memset(pDrvRaidMap->ldTgtIdToLd,
177 	       0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
178 
179 	if (instance->max_raid_mapsize) {
180 		fw_map_dyn = fusion->ld_map[(map_id & 1)];
181 		desc_table =
182 		(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
183 		if (desc_table != fw_map_dyn->raid_map_desc_table)
184 			dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
185 				desc_table, fw_map_dyn->raid_map_desc_table);
186 
187 		ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
188 		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
189 		pDrvRaidMap->fpPdIoTimeoutSec =
190 			fw_map_dyn->fp_pd_io_timeout_sec;
191 		pDrvRaidMap->totalSize =
192 			cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL));
193 		/* point to actual data starting point*/
194 		raid_map_data = (void *)fw_map_dyn +
195 			le32_to_cpu(fw_map_dyn->desc_table_offset) +
196 			le32_to_cpu(fw_map_dyn->desc_table_size);
197 
198 		for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
199 			switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
200 			case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
201 				fw_map_dyn->dev_hndl_info =
202 				(struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
203 				memcpy(pDrvRaidMap->devHndlInfo,
204 					fw_map_dyn->dev_hndl_info,
205 					sizeof(struct MR_DEV_HANDLE_INFO) *
206 					le32_to_cpu(desc_table->raid_map_desc_elements));
207 			break;
208 			case RAID_MAP_DESC_TYPE_TGTID_INFO:
209 				fw_map_dyn->ld_tgt_id_to_ld =
210 					(u16 *)(raid_map_data +
211 					le32_to_cpu(desc_table->raid_map_desc_offset));
212 				for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
213 					pDrvRaidMap->ldTgtIdToLd[j] =
214 						le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
215 				}
216 			break;
217 			case RAID_MAP_DESC_TYPE_ARRAY_INFO:
218 				fw_map_dyn->ar_map_info =
219 					(struct MR_ARRAY_INFO *)
220 					(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
221 				memcpy(pDrvRaidMap->arMapInfo,
222 				       fw_map_dyn->ar_map_info,
223 				       sizeof(struct MR_ARRAY_INFO) *
224 				       le32_to_cpu(desc_table->raid_map_desc_elements));
225 			break;
226 			case RAID_MAP_DESC_TYPE_SPAN_INFO:
227 				fw_map_dyn->ld_span_map =
228 					(struct MR_LD_SPAN_MAP *)
229 					(raid_map_data +
230 					le32_to_cpu(desc_table->raid_map_desc_offset));
231 				memcpy(pDrvRaidMap->ldSpanMap,
232 				       fw_map_dyn->ld_span_map,
233 				       sizeof(struct MR_LD_SPAN_MAP) *
234 				       le32_to_cpu(desc_table->raid_map_desc_elements));
235 			break;
236 			default:
237 				dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
238 					fw_map_dyn->desc_table_num_elements);
239 			}
240 			++desc_table;
241 		}
242 
243 	} else if (instance->supportmax256vd) {
244 		fw_map_ext =
245 			(struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
246 		ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
247 		if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
248 			dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
249 			return 1;
250 		}
251 
252 		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
253 		pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
254 		for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
255 			pDrvRaidMap->ldTgtIdToLd[i] =
256 				(u16)fw_map_ext->ldTgtIdToLd[i];
257 		memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
258 		       sizeof(struct MR_LD_SPAN_MAP) * ld_count);
259 		memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
260 		       sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
261 		memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
262 		       sizeof(struct MR_DEV_HANDLE_INFO) *
263 		       MAX_RAIDMAP_PHYSICAL_DEVICES);
264 
265 		/* New Raid map will not set totalSize, so keep expected value
266 		 * for legacy code in ValidateMapInfo
267 		 */
268 		pDrvRaidMap->totalSize =
269 			cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
270 	} else {
271 		fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
272 				fusion->ld_map[(map_id & 1)];
273 		pFwRaidMap = &fw_map_old->raidMap;
274 		ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
275 		if (ld_count > MAX_LOGICAL_DRIVES) {
276 			dev_dbg(&instance->pdev->dev,
277 				"LD count exposed in RAID map in not valid\n");
278 			return 1;
279 		}
280 
281 		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
282 		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
283 		pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
284 		for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
285 			pDrvRaidMap->ldTgtIdToLd[i] =
286 				(u8)pFwRaidMap->ldTgtIdToLd[i];
287 		for (i = 0; i < ld_count; i++) {
288 			pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
289 		}
290 		memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
291 			sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
292 		memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
293 			sizeof(struct MR_DEV_HANDLE_INFO) *
294 			MAX_RAIDMAP_PHYSICAL_DEVICES);
295 	}
296 
297 	return 0;
298 }
299 
300 /*
301  * This function will validate Map info data provided by FW
302  */
MR_ValidateMapInfo(struct megasas_instance * instance,u64 map_id)303 u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
304 {
305 	struct fusion_context *fusion;
306 	struct MR_DRV_RAID_MAP_ALL *drv_map;
307 	struct MR_DRV_RAID_MAP *pDrvRaidMap;
308 	struct LD_LOAD_BALANCE_INFO *lbInfo;
309 	PLD_SPAN_INFO ldSpanInfo;
310 	struct MR_LD_RAID         *raid;
311 	u16 num_lds, i;
312 	u16 ld;
313 	u32 expected_size;
314 
315 	if (MR_PopulateDrvRaidMap(instance, map_id))
316 		return 0;
317 
318 	fusion = instance->ctrl_context;
319 	drv_map = fusion->ld_drv_map[(map_id & 1)];
320 	pDrvRaidMap = &drv_map->raidMap;
321 
322 	lbInfo = fusion->load_balance_info;
323 	ldSpanInfo = fusion->log_to_span;
324 
325 	if (instance->max_raid_mapsize)
326 		expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
327 	else if (instance->supportmax256vd)
328 		expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
329 	else
330 		expected_size =
331 			(sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
332 			(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
333 
334 	if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
335 		dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
336 			le32_to_cpu(pDrvRaidMap->totalSize));
337 		dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
338 			(unsigned int)expected_size);
339 		dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
340 			(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
341 			le32_to_cpu(pDrvRaidMap->totalSize));
342 		return 0;
343 	}
344 
345 	if (instance->UnevenSpanSupport)
346 		mr_update_span_set(drv_map, ldSpanInfo);
347 
348 	if (lbInfo)
349 		mr_update_load_balance_params(drv_map, lbInfo);
350 
351 	num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
352 
353 	memcpy(instance->ld_ids_prev,
354 	       instance->ld_ids_from_raidmap,
355 	       sizeof(instance->ld_ids_from_raidmap));
356 	memset(instance->ld_ids_from_raidmap, 0xff, MEGASAS_MAX_LD_IDS);
357 	/*Convert Raid capability values to CPU arch */
358 	for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
359 		ld = MR_TargetIdToLdGet(i, drv_map);
360 
361 		/* For non existing VDs, iterate to next VD*/
362 		if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
363 			continue;
364 
365 		raid = MR_LdRaidGet(ld, drv_map);
366 		le32_to_cpus((u32 *)&raid->capability);
367 		instance->ld_ids_from_raidmap[i] = i;
368 		num_lds--;
369 	}
370 
371 	return 1;
372 }
373 
MR_GetSpanBlock(u32 ld,u64 row,u64 * span_blk,struct MR_DRV_RAID_MAP_ALL * map)374 u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
375 		    struct MR_DRV_RAID_MAP_ALL *map)
376 {
377 	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
378 	struct MR_QUAD_ELEMENT    *quad;
379 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
380 	u32                span, j;
381 
382 	for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
383 
384 		for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
385 			quad = &pSpanBlock->block_span_info.quad[j];
386 
387 			if (le32_to_cpu(quad->diff) == 0)
388 				return SPAN_INVALID;
389 			if (le64_to_cpu(quad->logStart) <= row && row <=
390 				le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
391 				le32_to_cpu(quad->diff))) == 0) {
392 				if (span_blk != NULL) {
393 					u64  blk, debugBlk;
394 					blk =  mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
395 					debugBlk = blk;
396 
397 					blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
398 					*span_blk = blk;
399 				}
400 				return span;
401 			}
402 		}
403 	}
404 	return SPAN_INVALID;
405 }
406 
407 /*
408 ******************************************************************************
409 *
410 * This routine calculates the Span block for given row using spanset.
411 *
412 * Inputs :
413 *    instance - HBA instance
414 *    ld   - Logical drive number
415 *    row        - Row number
416 *    map    - LD map
417 *
418 * Outputs :
419 *
420 *    span          - Span number
421 *    block         - Absolute Block number in the physical disk
422 *    div_error	   - Devide error code.
423 */
424 
mr_spanset_get_span_block(struct megasas_instance * instance,u32 ld,u64 row,u64 * span_blk,struct MR_DRV_RAID_MAP_ALL * map)425 u32 mr_spanset_get_span_block(struct megasas_instance *instance,
426 		u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
427 {
428 	struct fusion_context *fusion = instance->ctrl_context;
429 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
430 	LD_SPAN_SET *span_set;
431 	struct MR_QUAD_ELEMENT    *quad;
432 	u32    span, info;
433 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
434 
435 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
436 		span_set = &(ldSpanInfo[ld].span_set[info]);
437 
438 		if (span_set->span_row_data_width == 0)
439 			break;
440 
441 		if (row > span_set->data_row_end)
442 			continue;
443 
444 		for (span = 0; span < raid->spanDepth; span++)
445 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
446 				block_span_info.noElements) >= info+1) {
447 				quad = &map->raidMap.ldSpanMap[ld].
448 					spanBlock[span].
449 					block_span_info.quad[info];
450 				if (le32_to_cpu(quad->diff) == 0)
451 					return SPAN_INVALID;
452 				if (le64_to_cpu(quad->logStart) <= row  &&
453 					row <= le64_to_cpu(quad->logEnd)  &&
454 					(mega_mod64(row - le64_to_cpu(quad->logStart),
455 						le32_to_cpu(quad->diff))) == 0) {
456 					if (span_blk != NULL) {
457 						u64  blk;
458 						blk = mega_div64_32
459 						    ((row - le64_to_cpu(quad->logStart)),
460 						    le32_to_cpu(quad->diff));
461 						blk = (blk + le64_to_cpu(quad->offsetInSpan))
462 							 << raid->stripeShift;
463 						*span_blk = blk;
464 					}
465 					return span;
466 				}
467 			}
468 	}
469 	return SPAN_INVALID;
470 }
471 
472 /*
473 ******************************************************************************
474 *
475 * This routine calculates the row for given strip using spanset.
476 *
477 * Inputs :
478 *    instance - HBA instance
479 *    ld   - Logical drive number
480 *    Strip        - Strip
481 *    map    - LD map
482 *
483 * Outputs :
484 *
485 *    row         - row associated with strip
486 */
487 
get_row_from_strip(struct megasas_instance * instance,u32 ld,u64 strip,struct MR_DRV_RAID_MAP_ALL * map)488 static u64  get_row_from_strip(struct megasas_instance *instance,
489 	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
490 {
491 	struct fusion_context *fusion = instance->ctrl_context;
492 	struct MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
493 	LD_SPAN_SET	*span_set;
494 	PLD_SPAN_INFO	ldSpanInfo = fusion->log_to_span;
495 	u32		info, strip_offset, span, span_offset;
496 	u64		span_set_Strip, span_set_Row, retval;
497 
498 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
499 		span_set = &(ldSpanInfo[ld].span_set[info]);
500 
501 		if (span_set->span_row_data_width == 0)
502 			break;
503 		if (strip > span_set->data_strip_end)
504 			continue;
505 
506 		span_set_Strip = strip - span_set->data_strip_start;
507 		strip_offset = mega_mod64(span_set_Strip,
508 				span_set->span_row_data_width);
509 		span_set_Row = mega_div64_32(span_set_Strip,
510 				span_set->span_row_data_width) * span_set->diff;
511 		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
512 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
513 				block_span_info.noElements) >= info+1) {
514 				if (strip_offset >=
515 					span_set->strip_offset[span])
516 					span_offset++;
517 				else
518 					break;
519 			}
520 
521 		retval = (span_set->data_row_start + span_set_Row +
522 				(span_offset - 1));
523 		return retval;
524 	}
525 	return -1LLU;
526 }
527 
528 
529 /*
530 ******************************************************************************
531 *
532 * This routine calculates the Start Strip for given row using spanset.
533 *
534 * Inputs :
535 *    instance - HBA instance
536 *    ld   - Logical drive number
537 *    row        - Row number
538 *    map    - LD map
539 *
540 * Outputs :
541 *
542 *    Strip         - Start strip associated with row
543 */
544 
get_strip_from_row(struct megasas_instance * instance,u32 ld,u64 row,struct MR_DRV_RAID_MAP_ALL * map)545 static u64 get_strip_from_row(struct megasas_instance *instance,
546 		u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
547 {
548 	struct fusion_context *fusion = instance->ctrl_context;
549 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
550 	LD_SPAN_SET *span_set;
551 	struct MR_QUAD_ELEMENT    *quad;
552 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
553 	u32    span, info;
554 	u64  strip;
555 
556 	for (info = 0; info < MAX_QUAD_DEPTH; info++) {
557 		span_set = &(ldSpanInfo[ld].span_set[info]);
558 
559 		if (span_set->span_row_data_width == 0)
560 			break;
561 		if (row > span_set->data_row_end)
562 			continue;
563 
564 		for (span = 0; span < raid->spanDepth; span++)
565 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
566 				block_span_info.noElements) >= info+1) {
567 				quad = &map->raidMap.ldSpanMap[ld].
568 					spanBlock[span].block_span_info.quad[info];
569 				if (le64_to_cpu(quad->logStart) <= row  &&
570 					row <= le64_to_cpu(quad->logEnd)  &&
571 					mega_mod64((row - le64_to_cpu(quad->logStart)),
572 					le32_to_cpu(quad->diff)) == 0) {
573 					strip = mega_div64_32
574 						(((row - span_set->data_row_start)
575 							- le64_to_cpu(quad->logStart)),
576 							le32_to_cpu(quad->diff));
577 					strip *= span_set->span_row_data_width;
578 					strip += span_set->data_strip_start;
579 					strip += span_set->strip_offset[span];
580 					return strip;
581 				}
582 			}
583 	}
584 	dev_err(&instance->pdev->dev, "get_strip_from_row"
585 		"returns invalid strip for ld=%x, row=%lx\n",
586 		ld, (long unsigned int)row);
587 	return -1;
588 }
589 
590 /*
591 ******************************************************************************
592 *
593 * This routine calculates the Physical Arm for given strip using spanset.
594 *
595 * Inputs :
596 *    instance - HBA instance
597 *    ld   - Logical drive number
598 *    strip      - Strip
599 *    map    - LD map
600 *
601 * Outputs :
602 *
603 *    Phys Arm         - Phys Arm associated with strip
604 */
605 
get_arm_from_strip(struct megasas_instance * instance,u32 ld,u64 strip,struct MR_DRV_RAID_MAP_ALL * map)606 static u32 get_arm_from_strip(struct megasas_instance *instance,
607 	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
608 {
609 	struct fusion_context *fusion = instance->ctrl_context;
610 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
611 	LD_SPAN_SET *span_set;
612 	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
613 	u32    info, strip_offset, span, span_offset, retval;
614 
615 	for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
616 		span_set = &(ldSpanInfo[ld].span_set[info]);
617 
618 		if (span_set->span_row_data_width == 0)
619 			break;
620 		if (strip > span_set->data_strip_end)
621 			continue;
622 
623 		strip_offset = (uint)mega_mod64
624 				((strip - span_set->data_strip_start),
625 				span_set->span_row_data_width);
626 
627 		for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
628 			if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
629 				block_span_info.noElements) >= info+1) {
630 				if (strip_offset >=
631 					span_set->strip_offset[span])
632 					span_offset =
633 						span_set->strip_offset[span];
634 				else
635 					break;
636 			}
637 
638 		retval = (strip_offset - span_offset);
639 		return retval;
640 	}
641 
642 	dev_err(&instance->pdev->dev, "get_arm_from_strip"
643 		"returns invalid arm for ld=%x strip=%lx\n",
644 		ld, (long unsigned int)strip);
645 
646 	return -1;
647 }
648 
649 /* This Function will return Phys arm */
get_arm(struct megasas_instance * instance,u32 ld,u8 span,u64 stripe,struct MR_DRV_RAID_MAP_ALL * map)650 u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
651 		struct MR_DRV_RAID_MAP_ALL *map)
652 {
653 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
654 	/* Need to check correct default value */
655 	u32    arm = 0;
656 
657 	switch (raid->level) {
658 	case 0:
659 	case 5:
660 	case 6:
661 		arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
662 		break;
663 	case 1:
664 		/* start with logical arm */
665 		arm = get_arm_from_strip(instance, ld, stripe, map);
666 		if (arm != -1U)
667 			arm *= 2;
668 		break;
669 	}
670 
671 	return arm;
672 }
673 
674 
675 /*
676 ******************************************************************************
677 *
678 * This routine calculates the arm, span and block for the specified stripe and
679 * reference in stripe using spanset
680 *
681 * Inputs :
682 *
683 *    ld   - Logical drive number
684 *    stripRow        - Stripe number
685 *    stripRef    - Reference in stripe
686 *
687 * Outputs :
688 *
689 *    span          - Span number
690 *    block         - Absolute Block number in the physical disk
691 */
mr_spanset_get_phy_params(struct megasas_instance * instance,u32 ld,u64 stripRow,u16 stripRef,struct IO_REQUEST_INFO * io_info,struct RAID_CONTEXT * pRAID_Context,struct MR_DRV_RAID_MAP_ALL * map)692 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
693 		u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
694 		struct RAID_CONTEXT *pRAID_Context,
695 		struct MR_DRV_RAID_MAP_ALL *map)
696 {
697 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
698 	u32     pd, arRef, r1_alt_pd;
699 	u8      physArm, span;
700 	u64     row;
701 	u8	retval = true;
702 	u64	*pdBlock = &io_info->pdBlock;
703 	__le16	*pDevHandle = &io_info->devHandle;
704 	u8	*pPdInterface = &io_info->pd_interface;
705 	u32	logArm, rowMod, armQ, arm;
706 	struct fusion_context *fusion;
707 
708 	fusion = instance->ctrl_context;
709 	*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
710 
711 	/*Get row and span from io_info for Uneven Span IO.*/
712 	row	    = io_info->start_row;
713 	span	    = io_info->start_span;
714 
715 
716 	if (raid->level == 6) {
717 		logArm = get_arm_from_strip(instance, ld, stripRow, map);
718 		if (logArm == -1U)
719 			return false;
720 		rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
721 		armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
722 		arm = armQ + 1 + logArm;
723 		if (arm >= SPAN_ROW_SIZE(map, ld, span))
724 			arm -= SPAN_ROW_SIZE(map, ld, span);
725 		physArm = (u8)arm;
726 	} else
727 		/* Calculate the arm */
728 		physArm = get_arm(instance, ld, span, stripRow, map);
729 	if (physArm == 0xFF)
730 		return false;
731 
732 	arRef       = MR_LdSpanArrayGet(ld, span, map);
733 	pd          = MR_ArPdGet(arRef, physArm, map);
734 
735 	if (pd != MR_PD_INVALID) {
736 		*pDevHandle = MR_PdDevHandleGet(pd, map);
737 		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
738 		/* get second pd also for raid 1/10 fast path writes*/
739 		if ((instance->adapter_type >= VENTURA_SERIES) &&
740 		    (raid->level == 1) &&
741 		    !io_info->isRead) {
742 			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
743 			if (r1_alt_pd != MR_PD_INVALID)
744 				io_info->r1_alt_dev_handle =
745 				MR_PdDevHandleGet(r1_alt_pd, map);
746 		}
747 	} else {
748 		if ((raid->level >= 5) &&
749 			((instance->adapter_type == THUNDERBOLT_SERIES)  ||
750 			((instance->adapter_type == INVADER_SERIES) &&
751 			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
752 			pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
753 		else if (raid->level == 1) {
754 			physArm = physArm + 1;
755 			pd = MR_ArPdGet(arRef, physArm, map);
756 			if (pd != MR_PD_INVALID) {
757 				*pDevHandle = MR_PdDevHandleGet(pd, map);
758 				*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
759 			}
760 		}
761 	}
762 
763 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
764 	if (instance->adapter_type >= VENTURA_SERIES) {
765 		((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
766 			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
767 		io_info->span_arm =
768 			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
769 	} else {
770 		pRAID_Context->span_arm =
771 			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
772 		io_info->span_arm = pRAID_Context->span_arm;
773 	}
774 	io_info->pd_after_lb = pd;
775 	return retval;
776 }
777 
778 /*
779 ******************************************************************************
780 *
781 * This routine calculates the arm, span and block for the specified stripe and
782 * reference in stripe.
783 *
784 * Inputs :
785 *
786 *    ld   - Logical drive number
787 *    stripRow        - Stripe number
788 *    stripRef    - Reference in stripe
789 *
790 * Outputs :
791 *
792 *    span          - Span number
793 *    block         - Absolute Block number in the physical disk
794 */
MR_GetPhyParams(struct megasas_instance * instance,u32 ld,u64 stripRow,u16 stripRef,struct IO_REQUEST_INFO * io_info,struct RAID_CONTEXT * pRAID_Context,struct MR_DRV_RAID_MAP_ALL * map)795 u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
796 		u16 stripRef, struct IO_REQUEST_INFO *io_info,
797 		struct RAID_CONTEXT *pRAID_Context,
798 		struct MR_DRV_RAID_MAP_ALL *map)
799 {
800 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
801 	u32         pd, arRef, r1_alt_pd;
802 	u8          physArm, span;
803 	u64         row;
804 	u8	    retval = true;
805 	u64	    *pdBlock = &io_info->pdBlock;
806 	__le16	    *pDevHandle = &io_info->devHandle;
807 	u8	    *pPdInterface = &io_info->pd_interface;
808 	struct fusion_context *fusion;
809 
810 	fusion = instance->ctrl_context;
811 	*pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
812 
813 	row =  mega_div64_32(stripRow, raid->rowDataSize);
814 
815 	if (raid->level == 6) {
816 		/* logical arm within row */
817 		u32 logArm =  mega_mod64(stripRow, raid->rowDataSize);
818 		u32 rowMod, armQ, arm;
819 
820 		if (raid->rowSize == 0)
821 			return false;
822 		/* get logical row mod */
823 		rowMod = mega_mod64(row, raid->rowSize);
824 		armQ = raid->rowSize-1-rowMod; /* index of Q drive */
825 		arm = armQ+1+logArm; /* data always logically follows Q */
826 		if (arm >= raid->rowSize) /* handle wrap condition */
827 			arm -= raid->rowSize;
828 		physArm = (u8)arm;
829 	} else  {
830 		if (raid->modFactor == 0)
831 			return false;
832 		physArm = MR_LdDataArmGet(ld,  mega_mod64(stripRow,
833 							  raid->modFactor),
834 					  map);
835 	}
836 
837 	if (raid->spanDepth == 1) {
838 		span = 0;
839 		*pdBlock = row << raid->stripeShift;
840 	} else {
841 		span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
842 		if (span == SPAN_INVALID)
843 			return false;
844 	}
845 
846 	/* Get the array on which this span is present */
847 	arRef       = MR_LdSpanArrayGet(ld, span, map);
848 	pd          = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
849 
850 	if (pd != MR_PD_INVALID) {
851 		/* Get dev handle from Pd. */
852 		*pDevHandle = MR_PdDevHandleGet(pd, map);
853 		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
854 		/* get second pd also for raid 1/10 fast path writes*/
855 		if ((instance->adapter_type >= VENTURA_SERIES) &&
856 		    (raid->level == 1) &&
857 		    !io_info->isRead) {
858 			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
859 			if (r1_alt_pd != MR_PD_INVALID)
860 				io_info->r1_alt_dev_handle =
861 					MR_PdDevHandleGet(r1_alt_pd, map);
862 		}
863 	} else {
864 		if ((raid->level >= 5) &&
865 			((instance->adapter_type == THUNDERBOLT_SERIES)  ||
866 			((instance->adapter_type == INVADER_SERIES) &&
867 			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
868 			pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
869 		else if (raid->level == 1) {
870 			/* Get alternate Pd. */
871 			physArm = physArm + 1;
872 			pd = MR_ArPdGet(arRef, physArm, map);
873 			if (pd != MR_PD_INVALID) {
874 				/* Get dev handle from Pd */
875 				*pDevHandle = MR_PdDevHandleGet(pd, map);
876 				*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
877 			}
878 		}
879 	}
880 
881 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
882 	if (instance->adapter_type >= VENTURA_SERIES) {
883 		((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
884 				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
885 		io_info->span_arm =
886 				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
887 	} else {
888 		pRAID_Context->span_arm =
889 			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
890 		io_info->span_arm = pRAID_Context->span_arm;
891 	}
892 	io_info->pd_after_lb = pd;
893 	return retval;
894 }
895 
896 /*
897  * mr_get_phy_params_r56_rmw -  Calculate parameters for R56 CTIO write operation
898  * @instance:			Adapter soft state
899  * @ld:				LD index
900  * @stripNo:			Strip Number
901  * @io_info:			IO info structure pointer
902  * pRAID_Context:		RAID context pointer
903  * map:				RAID map pointer
904  *
905  * This routine calculates the logical arm, data Arm, row number and parity arm
906  * for R56 CTIO write operation.
907  */
mr_get_phy_params_r56_rmw(struct megasas_instance * instance,u32 ld,u64 stripNo,struct IO_REQUEST_INFO * io_info,struct RAID_CONTEXT_G35 * pRAID_Context,struct MR_DRV_RAID_MAP_ALL * map)908 static void mr_get_phy_params_r56_rmw(struct megasas_instance *instance,
909 			    u32 ld, u64 stripNo,
910 			    struct IO_REQUEST_INFO *io_info,
911 			    struct RAID_CONTEXT_G35 *pRAID_Context,
912 			    struct MR_DRV_RAID_MAP_ALL *map)
913 {
914 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
915 	u8          span, dataArms, arms, dataArm, logArm;
916 	s8          rightmostParityArm, PParityArm;
917 	u64         rowNum;
918 	u64 *pdBlock = &io_info->pdBlock;
919 
920 	dataArms = raid->rowDataSize;
921 	arms = raid->rowSize;
922 
923 	rowNum =  mega_div64_32(stripNo, dataArms);
924 	/* parity disk arm, first arm is 0 */
925 	rightmostParityArm = (arms - 1) - mega_mod64(rowNum, arms);
926 
927 	/* logical arm within row */
928 	logArm =  mega_mod64(stripNo, dataArms);
929 	/* physical arm for data */
930 	dataArm = mega_mod64((rightmostParityArm + 1 + logArm), arms);
931 
932 	if (raid->spanDepth == 1) {
933 		span = 0;
934 	} else {
935 		span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map);
936 		if (span == SPAN_INVALID)
937 			return;
938 	}
939 
940 	if (raid->level == 6) {
941 		/* P Parity arm, note this can go negative adjust if negative */
942 		PParityArm = (arms - 2) - mega_mod64(rowNum, arms);
943 
944 		if (PParityArm < 0)
945 			PParityArm += arms;
946 
947 		/* rightmostParityArm is P-Parity for RAID 5 and Q-Parity for RAID */
948 		pRAID_Context->flow_specific.r56_arm_map = rightmostParityArm;
949 		pRAID_Context->flow_specific.r56_arm_map |=
950 				    (u16)(PParityArm << RAID_CTX_R56_P_ARM_SHIFT);
951 	} else {
952 		pRAID_Context->flow_specific.r56_arm_map |=
953 				    (u16)(rightmostParityArm << RAID_CTX_R56_P_ARM_SHIFT);
954 	}
955 
956 	pRAID_Context->reg_lock_row_lba = cpu_to_le64(rowNum);
957 	pRAID_Context->flow_specific.r56_arm_map |=
958 				   (u16)(logArm << RAID_CTX_R56_LOG_ARM_SHIFT);
959 	cpu_to_le16s(&pRAID_Context->flow_specific.r56_arm_map);
960 	pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm;
961 	pRAID_Context->raid_flags = (MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD <<
962 				    MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
963 
964 	return;
965 }
966 
967 /*
968 ******************************************************************************
969 *
970 * MR_BuildRaidContext function
971 *
972 * This function will initiate command processing.  The start/end row and strip
973 * information is calculated then the lock is acquired.
974 * This function will return 0 if region lock was acquired OR return num strips
975 */
976 u8
MR_BuildRaidContext(struct megasas_instance * instance,struct IO_REQUEST_INFO * io_info,struct RAID_CONTEXT * pRAID_Context,struct MR_DRV_RAID_MAP_ALL * map,u8 ** raidLUN)977 MR_BuildRaidContext(struct megasas_instance *instance,
978 		    struct IO_REQUEST_INFO *io_info,
979 		    struct RAID_CONTEXT *pRAID_Context,
980 		    struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
981 {
982 	struct fusion_context *fusion;
983 	struct MR_LD_RAID  *raid;
984 	u32         stripSize, stripe_mask;
985 	u64         endLba, endStrip, endRow, start_row, start_strip;
986 	u64         regStart;
987 	u32         regSize;
988 	u8          num_strips, numRows;
989 	u16         ref_in_start_stripe, ref_in_end_stripe;
990 	u64         ldStartBlock;
991 	u32         numBlocks, ldTgtId;
992 	u8          isRead;
993 	u8	    retval = 0;
994 	u8	    startlba_span = SPAN_INVALID;
995 	u64 *pdBlock = &io_info->pdBlock;
996 	u16	    ld;
997 
998 	ldStartBlock = io_info->ldStartBlock;
999 	numBlocks = io_info->numBlocks;
1000 	ldTgtId = io_info->ldTgtId;
1001 	isRead = io_info->isRead;
1002 	io_info->IoforUnevenSpan = 0;
1003 	io_info->start_span	= SPAN_INVALID;
1004 	fusion = instance->ctrl_context;
1005 
1006 	ld = MR_TargetIdToLdGet(ldTgtId, map);
1007 	raid = MR_LdRaidGet(ld, map);
1008 	/*check read ahead bit*/
1009 	io_info->ra_capable = raid->capability.ra_capable;
1010 
1011 	/*
1012 	 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
1013 	 * return FALSE
1014 	 */
1015 	if (raid->rowDataSize == 0) {
1016 		if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
1017 			return false;
1018 		else if (instance->UnevenSpanSupport) {
1019 			io_info->IoforUnevenSpan = 1;
1020 		} else {
1021 			dev_info(&instance->pdev->dev,
1022 				"raid->rowDataSize is 0, but has SPAN[0]"
1023 				"rowDataSize = 0x%0x,"
1024 				"but there is _NO_ UnevenSpanSupport\n",
1025 				MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
1026 			return false;
1027 		}
1028 	}
1029 
1030 	stripSize = 1 << raid->stripeShift;
1031 	stripe_mask = stripSize-1;
1032 
1033 	io_info->data_arms = raid->rowDataSize;
1034 
1035 	/*
1036 	 * calculate starting row and stripe, and number of strips and rows
1037 	 */
1038 	start_strip         = ldStartBlock >> raid->stripeShift;
1039 	ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
1040 	endLba              = ldStartBlock + numBlocks - 1;
1041 	ref_in_end_stripe   = (u16)(endLba & stripe_mask);
1042 	endStrip            = endLba >> raid->stripeShift;
1043 	num_strips          = (u8)(endStrip - start_strip + 1); /* End strip */
1044 
1045 	if (io_info->IoforUnevenSpan) {
1046 		start_row = get_row_from_strip(instance, ld, start_strip, map);
1047 		endRow	  = get_row_from_strip(instance, ld, endStrip, map);
1048 		if (start_row == -1ULL || endRow == -1ULL) {
1049 			dev_info(&instance->pdev->dev, "return from %s %d."
1050 				"Send IO w/o region lock.\n",
1051 				__func__, __LINE__);
1052 			return false;
1053 		}
1054 
1055 		if (raid->spanDepth == 1) {
1056 			startlba_span = 0;
1057 			*pdBlock = start_row << raid->stripeShift;
1058 		} else
1059 			startlba_span = (u8)mr_spanset_get_span_block(instance,
1060 						ld, start_row, pdBlock, map);
1061 		if (startlba_span == SPAN_INVALID) {
1062 			dev_info(&instance->pdev->dev, "return from %s %d"
1063 				"for row 0x%llx,start strip %llx"
1064 				"endSrip %llx\n", __func__, __LINE__,
1065 				(unsigned long long)start_row,
1066 				(unsigned long long)start_strip,
1067 				(unsigned long long)endStrip);
1068 			return false;
1069 		}
1070 		io_info->start_span	= startlba_span;
1071 		io_info->start_row	= start_row;
1072 	} else {
1073 		start_row = mega_div64_32(start_strip, raid->rowDataSize);
1074 		endRow    = mega_div64_32(endStrip, raid->rowDataSize);
1075 	}
1076 	numRows = (u8)(endRow - start_row + 1);
1077 
1078 	/*
1079 	 * calculate region info.
1080 	 */
1081 
1082 	/* assume region is at the start of the first row */
1083 	regStart            = start_row << raid->stripeShift;
1084 	/* assume this IO needs the full row - we'll adjust if not true */
1085 	regSize             = stripSize;
1086 
1087 	io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;
1088 
1089 	/* Check if we can send this I/O via FastPath */
1090 	if (raid->capability.fpCapable) {
1091 		if (isRead)
1092 			io_info->fpOkForIo = (raid->capability.fpReadCapable &&
1093 					      ((num_strips == 1) ||
1094 					       raid->capability.
1095 					       fpReadAcrossStripe));
1096 		else
1097 			io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
1098 					      ((num_strips == 1) ||
1099 					       raid->capability.
1100 					       fpWriteAcrossStripe));
1101 	} else
1102 		io_info->fpOkForIo = false;
1103 
1104 	if (numRows == 1) {
1105 		/* single-strip IOs can always lock only the data needed */
1106 		if (num_strips == 1) {
1107 			regStart += ref_in_start_stripe;
1108 			regSize = numBlocks;
1109 		}
1110 		/* multi-strip IOs always need to full stripe locked */
1111 	} else if (io_info->IoforUnevenSpan == 0) {
1112 		/*
1113 		 * For Even span region lock optimization.
1114 		 * If the start strip is the last in the start row
1115 		 */
1116 		if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
1117 			regStart += ref_in_start_stripe;
1118 			/* initialize count to sectors from startref to end
1119 			   of strip */
1120 			regSize = stripSize - ref_in_start_stripe;
1121 		}
1122 
1123 		/* add complete rows in the middle of the transfer */
1124 		if (numRows > 2)
1125 			regSize += (numRows-2) << raid->stripeShift;
1126 
1127 		/* if IO ends within first strip of last row*/
1128 		if (endStrip == endRow*raid->rowDataSize)
1129 			regSize += ref_in_end_stripe+1;
1130 		else
1131 			regSize += stripSize;
1132 	} else {
1133 		/*
1134 		 * For Uneven span region lock optimization.
1135 		 * If the start strip is the last in the start row
1136 		 */
1137 		if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
1138 				SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
1139 			regStart += ref_in_start_stripe;
1140 			/* initialize count to sectors from
1141 			 * startRef to end of strip
1142 			 */
1143 			regSize = stripSize - ref_in_start_stripe;
1144 		}
1145 		/* Add complete rows in the middle of the transfer*/
1146 
1147 		if (numRows > 2)
1148 			/* Add complete rows in the middle of the transfer*/
1149 			regSize += (numRows-2) << raid->stripeShift;
1150 
1151 		/* if IO ends within first strip of last row */
1152 		if (endStrip == get_strip_from_row(instance, ld, endRow, map))
1153 			regSize += ref_in_end_stripe + 1;
1154 		else
1155 			regSize += stripSize;
1156 	}
1157 
1158 	pRAID_Context->timeout_value =
1159 		cpu_to_le16(raid->fpIoTimeoutForLd ?
1160 			    raid->fpIoTimeoutForLd :
1161 			    map->raidMap.fpPdIoTimeoutSec);
1162 	if (instance->adapter_type == INVADER_SERIES)
1163 		pRAID_Context->reg_lock_flags = (isRead) ?
1164 			raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
1165 	else if (instance->adapter_type == THUNDERBOLT_SERIES)
1166 		pRAID_Context->reg_lock_flags = (isRead) ?
1167 			REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
1168 	pRAID_Context->virtual_disk_tgt_id = raid->targetId;
1169 	pRAID_Context->reg_lock_row_lba    = cpu_to_le64(regStart);
1170 	pRAID_Context->reg_lock_length    = cpu_to_le32(regSize);
1171 	pRAID_Context->config_seq_num	= raid->seqNum;
1172 	/* save pointer to raid->LUN array */
1173 	*raidLUN = raid->LUN;
1174 
1175 	/* Aero R5/6 Division Offload for WRITE */
1176 	if (fusion->r56_div_offload && (raid->level >= 5) && !isRead) {
1177 		mr_get_phy_params_r56_rmw(instance, ld, start_strip, io_info,
1178 				       (struct RAID_CONTEXT_G35 *)pRAID_Context,
1179 				       map);
1180 		return true;
1181 	}
1182 
1183 	/*Get Phy Params only if FP capable, or else leave it to MR firmware
1184 	  to do the calculation.*/
1185 	if (io_info->fpOkForIo) {
1186 		retval = io_info->IoforUnevenSpan ?
1187 				mr_spanset_get_phy_params(instance, ld,
1188 					start_strip, ref_in_start_stripe,
1189 					io_info, pRAID_Context, map) :
1190 				MR_GetPhyParams(instance, ld, start_strip,
1191 					ref_in_start_stripe, io_info,
1192 					pRAID_Context, map);
1193 		/* If IO on an invalid Pd, then FP is not possible.*/
1194 		if (io_info->devHandle == MR_DEVHANDLE_INVALID)
1195 			io_info->fpOkForIo = false;
1196 		return retval;
1197 	} else if (isRead) {
1198 		uint stripIdx;
1199 		for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
1200 			retval = io_info->IoforUnevenSpan ?
1201 				mr_spanset_get_phy_params(instance, ld,
1202 				    start_strip + stripIdx,
1203 				    ref_in_start_stripe, io_info,
1204 				    pRAID_Context, map) :
1205 				MR_GetPhyParams(instance, ld,
1206 				    start_strip + stripIdx, ref_in_start_stripe,
1207 				    io_info, pRAID_Context, map);
1208 			if (!retval)
1209 				return true;
1210 		}
1211 	}
1212 	return true;
1213 }
1214 
1215 /*
1216 ******************************************************************************
1217 *
1218 * This routine pepare spanset info from Valid Raid map and store it into
1219 * local copy of ldSpanInfo per instance data structure.
1220 *
1221 * Inputs :
1222 * map    - LD map
1223 * ldSpanInfo - ldSpanInfo per HBA instance
1224 *
1225 */
mr_update_span_set(struct MR_DRV_RAID_MAP_ALL * map,PLD_SPAN_INFO ldSpanInfo)1226 void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
1227 	PLD_SPAN_INFO ldSpanInfo)
1228 {
1229 	u8   span, count;
1230 	u32  element, span_row_width;
1231 	u64  span_row;
1232 	struct MR_LD_RAID *raid;
1233 	LD_SPAN_SET *span_set, *span_set_prev;
1234 	struct MR_QUAD_ELEMENT    *quad;
1235 	int ldCount;
1236 	u16 ld;
1237 
1238 
1239 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1240 		ld = MR_TargetIdToLdGet(ldCount, map);
1241 		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
1242 			continue;
1243 		raid = MR_LdRaidGet(ld, map);
1244 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
1245 			for (span = 0; span < raid->spanDepth; span++) {
1246 				if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1247 					block_span_info.noElements) <
1248 					element + 1)
1249 					continue;
1250 				span_set = &(ldSpanInfo[ld].span_set[element]);
1251 				quad = &map->raidMap.ldSpanMap[ld].
1252 					spanBlock[span].block_span_info.
1253 					quad[element];
1254 
1255 				span_set->diff = le32_to_cpu(quad->diff);
1256 
1257 				for (count = 0, span_row_width = 0;
1258 					count < raid->spanDepth; count++) {
1259 					if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
1260 						spanBlock[count].
1261 						block_span_info.
1262 						noElements) >= element + 1) {
1263 						span_set->strip_offset[count] =
1264 							span_row_width;
1265 						span_row_width +=
1266 							MR_LdSpanPtrGet
1267 							(ld, count, map)->spanRowDataSize;
1268 					}
1269 				}
1270 
1271 				span_set->span_row_data_width = span_row_width;
1272 				span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
1273 					le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
1274 					le32_to_cpu(quad->diff));
1275 
1276 				if (element == 0) {
1277 					span_set->log_start_lba = 0;
1278 					span_set->log_end_lba =
1279 						((span_row << raid->stripeShift)
1280 						* span_row_width) - 1;
1281 
1282 					span_set->span_row_start = 0;
1283 					span_set->span_row_end = span_row - 1;
1284 
1285 					span_set->data_strip_start = 0;
1286 					span_set->data_strip_end =
1287 						(span_row * span_row_width) - 1;
1288 
1289 					span_set->data_row_start = 0;
1290 					span_set->data_row_end =
1291 						(span_row * le32_to_cpu(quad->diff)) - 1;
1292 				} else {
1293 					span_set_prev = &(ldSpanInfo[ld].
1294 							span_set[element - 1]);
1295 					span_set->log_start_lba =
1296 						span_set_prev->log_end_lba + 1;
1297 					span_set->log_end_lba =
1298 						span_set->log_start_lba +
1299 						((span_row << raid->stripeShift)
1300 						* span_row_width) - 1;
1301 
1302 					span_set->span_row_start =
1303 						span_set_prev->span_row_end + 1;
1304 					span_set->span_row_end =
1305 					span_set->span_row_start + span_row - 1;
1306 
1307 					span_set->data_strip_start =
1308 					span_set_prev->data_strip_end + 1;
1309 					span_set->data_strip_end =
1310 						span_set->data_strip_start +
1311 						(span_row * span_row_width) - 1;
1312 
1313 					span_set->data_row_start =
1314 						span_set_prev->data_row_end + 1;
1315 					span_set->data_row_end =
1316 						span_set->data_row_start +
1317 						(span_row * le32_to_cpu(quad->diff)) - 1;
1318 				}
1319 				break;
1320 		}
1321 		if (span == raid->spanDepth)
1322 			break;
1323 	    }
1324 	}
1325 }
1326 
mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL * drv_map,struct LD_LOAD_BALANCE_INFO * lbInfo)1327 void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1328 	struct LD_LOAD_BALANCE_INFO *lbInfo)
1329 {
1330 	int ldCount;
1331 	u16 ld;
1332 	struct MR_LD_RAID *raid;
1333 
1334 	if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
1335 		lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
1336 
1337 	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1338 		ld = MR_TargetIdToLdGet(ldCount, drv_map);
1339 		if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
1340 			lbInfo[ldCount].loadBalanceFlag = 0;
1341 			continue;
1342 		}
1343 
1344 		raid = MR_LdRaidGet(ld, drv_map);
1345 		if ((raid->level != 1) ||
1346 			(raid->ldState != MR_LD_STATE_OPTIMAL)) {
1347 			lbInfo[ldCount].loadBalanceFlag = 0;
1348 			continue;
1349 		}
1350 		lbInfo[ldCount].loadBalanceFlag = 1;
1351 	}
1352 }
1353 
megasas_get_best_arm_pd(struct megasas_instance * instance,struct LD_LOAD_BALANCE_INFO * lbInfo,struct IO_REQUEST_INFO * io_info,struct MR_DRV_RAID_MAP_ALL * drv_map)1354 u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1355 			   struct LD_LOAD_BALANCE_INFO *lbInfo,
1356 			   struct IO_REQUEST_INFO *io_info,
1357 			   struct MR_DRV_RAID_MAP_ALL *drv_map)
1358 {
1359 	struct MR_LD_RAID  *raid;
1360 	u16	pd1_dev_handle;
1361 	u16     pend0, pend1, ld;
1362 	u64     diff0, diff1;
1363 	u8      bestArm, pd0, pd1, span, arm;
1364 	u32     arRef, span_row_size;
1365 
1366 	u64 block = io_info->ldStartBlock;
1367 	u32 count = io_info->numBlocks;
1368 
1369 	span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
1370 			>> RAID_CTX_SPANARM_SPAN_SHIFT);
1371 	arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
1372 
1373 	ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
1374 	raid = MR_LdRaidGet(ld, drv_map);
1375 	span_row_size = instance->UnevenSpanSupport ?
1376 			SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
1377 
1378 	arRef = MR_LdSpanArrayGet(ld, span, drv_map);
1379 	pd0 = MR_ArPdGet(arRef, arm, drv_map);
1380 	pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
1381 		(arm + 1 - span_row_size) : arm + 1, drv_map);
1382 
1383 	/* Get PD1 Dev Handle */
1384 
1385 	pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
1386 
1387 	if (pd1_dev_handle == MR_DEVHANDLE_INVALID) {
1388 		bestArm = arm;
1389 	} else {
1390 		/* get the pending cmds for the data and mirror arms */
1391 		pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
1392 		pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
1393 
1394 		/* Determine the disk whose head is nearer to the req. block */
1395 		diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
1396 		diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
1397 		bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
1398 
1399 		/* Make balance count from 16 to 4 to
1400 		 *  keep driver in sync with Firmware
1401 		 */
1402 		if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
1403 		    (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
1404 			bestArm ^= 1;
1405 
1406 		/* Update the last accessed block on the correct pd */
1407 		io_info->span_arm =
1408 			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
1409 		io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
1410 	}
1411 
1412 	lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
1413 	return io_info->pd_after_lb;
1414 }
1415 
get_updated_dev_handle(struct megasas_instance * instance,struct LD_LOAD_BALANCE_INFO * lbInfo,struct IO_REQUEST_INFO * io_info,struct MR_DRV_RAID_MAP_ALL * drv_map)1416 __le16 get_updated_dev_handle(struct megasas_instance *instance,
1417 			      struct LD_LOAD_BALANCE_INFO *lbInfo,
1418 			      struct IO_REQUEST_INFO *io_info,
1419 			      struct MR_DRV_RAID_MAP_ALL *drv_map)
1420 {
1421 	u8 arm_pd;
1422 	__le16 devHandle;
1423 
1424 	/* get best new arm (PD ID) */
1425 	arm_pd  = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map);
1426 	devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
1427 	io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map);
1428 	atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
1429 
1430 	return devHandle;
1431 }
1432