1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/pci.h>
38#include <linux/list.h>
39#include <linux/moduleparam.h>
40#include <linux/module.h>
41#include <linux/spinlock.h>
42#include <linux/interrupt.h>
43#include <linux/delay.h>
44#include <linux/uio.h>
45#include <linux/uaccess.h>
46#include <linux/fs.h>
47#include <linux/compat.h>
48#include <linux/blkdev.h>
49#include <linux/poll.h>
50
51#include <scsi/scsi.h>
52#include <scsi/scsi_cmnd.h>
53#include <scsi/scsi_device.h>
54#include <scsi/scsi_host.h>
55
56#include "megaraid_sas_fusion.h"
57#include "megaraid_sas.h"
58#include <asm/div64.h>
59
60#define LB_PENDING_CMDS_DEFAULT 4
61static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
62module_param(lb_pending_cmds, int, S_IRUGO);
63MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
64 "threshold. Valid Values are 1-128. Default: 4");
65
66
67#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
68#define MR_LD_STATE_OPTIMAL 3
69#define FALSE 0
70#define TRUE 1
71
72#define SPAN_DEBUG 0
73#define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
74#define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
75#define SPAN_INVALID 0xff
76
77
78static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
79 PLD_SPAN_INFO ldSpanInfo);
80static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
81 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
82 struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
83static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
84 u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
85
86u32 mega_mod64(u64 dividend, u32 divisor)
87{
88 u64 d;
89 u32 remainder;
90
91 if (!divisor)
92 printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n");
93 d = dividend;
94 remainder = do_div(d, divisor);
95 return remainder;
96}
97
98
99
100
101
102
103
104u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
105{
106 u32 remainder;
107 u64 d;
108
109 if (!divisor)
110 printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n");
111
112 d = dividend;
113 remainder = do_div(d, divisor);
114
115 return d;
116}
117
118struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
119{
120 return &map->raidMap.ldSpanMap[ld].ldRaid;
121}
122
123static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
124 struct MR_DRV_RAID_MAP_ALL
125 *map)
126{
127 return &map->raidMap.ldSpanMap[ld].spanBlock[0];
128}
129
130static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
131{
132 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
133}
134
135u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
136{
137 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
138}
139
140u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
141{
142 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
143}
144
145u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
146{
147 return map->raidMap.devHndlInfo[pd].curDevHdl;
148}
149
150u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
151{
152 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
153}
154
155u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
156{
157 return map->raidMap.ldTgtIdToLd[ldTgtId];
158}
159
160static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
161 struct MR_DRV_RAID_MAP_ALL *map)
162{
163 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
164}
165
166
167
168
169void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
170{
171 struct fusion_context *fusion = instance->ctrl_context;
172 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
173 struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
174 int i;
175 u16 ld_count;
176
177
178 struct MR_DRV_RAID_MAP_ALL *drv_map =
179 fusion->ld_drv_map[(instance->map_id & 1)];
180 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
181
182 if (instance->supportmax256vd) {
183 memcpy(fusion->ld_drv_map[instance->map_id & 1],
184 fusion->ld_map[instance->map_id & 1],
185 fusion->current_map_sz);
186
187
188
189 pDrvRaidMap->totalSize =
190 cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
191 } else {
192 fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
193 fusion->ld_map[(instance->map_id & 1)];
194 pFwRaidMap = &fw_map_old->raidMap;
195 ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
196
197#if VD_EXT_DEBUG
198 for (i = 0; i < ld_count; i++) {
199 dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
200 "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
201 instance->unique_id, i,
202 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
203 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
204 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
205 }
206#endif
207
208 memset(drv_map, 0, fusion->drv_map_sz);
209 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
210 pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
211 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
212 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
213 pDrvRaidMap->ldTgtIdToLd[i] =
214 (u8)pFwRaidMap->ldTgtIdToLd[i];
215 for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
216 i < MAX_LOGICAL_DRIVES_EXT; i++)
217 pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
218 for (i = 0; i < ld_count; i++) {
219 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
220#if VD_EXT_DEBUG
221 dev_dbg(&instance->pdev->dev,
222 "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
223 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
224 "size 0x%x\n", i, i,
225 pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
226 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
227 (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
228 dev_dbg(&instance->pdev->dev,
229 "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
230 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
231 "size 0x%x\n", i, i,
232 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
233 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
234 (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
235 dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
236 "raid map %p LD RAID MAP %p/%p\n", drv_map,
237 pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
238 &pDrvRaidMap->ldSpanMap[i].ldRaid);
239#endif
240 }
241 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
242 sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
243 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
244 sizeof(struct MR_DEV_HANDLE_INFO) *
245 MAX_RAIDMAP_PHYSICAL_DEVICES);
246 }
247}
248
249
250
251
252u8 MR_ValidateMapInfo(struct megasas_instance *instance)
253{
254 struct fusion_context *fusion;
255 struct MR_DRV_RAID_MAP_ALL *drv_map;
256 struct MR_DRV_RAID_MAP *pDrvRaidMap;
257 struct LD_LOAD_BALANCE_INFO *lbInfo;
258 PLD_SPAN_INFO ldSpanInfo;
259 struct MR_LD_RAID *raid;
260 u16 ldCount, num_lds;
261 u16 ld;
262 u32 expected_size;
263
264
265 MR_PopulateDrvRaidMap(instance);
266
267 fusion = instance->ctrl_context;
268 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
269 pDrvRaidMap = &drv_map->raidMap;
270
271 lbInfo = fusion->load_balance_info;
272 ldSpanInfo = fusion->log_to_span;
273
274 if (instance->supportmax256vd)
275 expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
276 else
277 expected_size =
278 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
279 (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
280
281 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
282 dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
283 (unsigned int) expected_size);
284 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
285 (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
286 le32_to_cpu(pDrvRaidMap->totalSize));
287 return 0;
288 }
289
290 if (instance->UnevenSpanSupport)
291 mr_update_span_set(drv_map, ldSpanInfo);
292
293 mr_update_load_balance_params(drv_map, lbInfo);
294
295 num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
296
297
298 for (ldCount = 0; ldCount < num_lds; ldCount++) {
299 ld = MR_TargetIdToLdGet(ldCount, drv_map);
300 raid = MR_LdRaidGet(ld, drv_map);
301 le32_to_cpus((u32 *)&raid->capability);
302 }
303
304 return 1;
305}
306
307u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
308 struct MR_DRV_RAID_MAP_ALL *map)
309{
310 struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
311 struct MR_QUAD_ELEMENT *quad;
312 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
313 u32 span, j;
314
315 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
316
317 for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) {
318 quad = &pSpanBlock->block_span_info.quad[j];
319
320 if (le32_to_cpu(quad->diff) == 0)
321 return SPAN_INVALID;
322 if (le64_to_cpu(quad->logStart) <= row && row <=
323 le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart),
324 le32_to_cpu(quad->diff))) == 0) {
325 if (span_blk != NULL) {
326 u64 blk, debugBlk;
327 blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff));
328 debugBlk = blk;
329
330 blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift;
331 *span_blk = blk;
332 }
333 return span;
334 }
335 }
336 }
337 return SPAN_INVALID;
338}
339
340
341
342
343
344
345
346
347
348
349#if SPAN_DEBUG
350static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
351 PLD_SPAN_INFO ldSpanInfo)
352{
353
354 u8 span;
355 u32 element;
356 struct MR_LD_RAID *raid;
357 LD_SPAN_SET *span_set;
358 struct MR_QUAD_ELEMENT *quad;
359 int ldCount;
360 u16 ld;
361
362 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
363 ld = MR_TargetIdToLdGet(ldCount, map);
364 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
365 continue;
366 raid = MR_LdRaidGet(ld, map);
367 dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
368 ld, raid->spanDepth);
369 for (span = 0; span < raid->spanDepth; span++)
370 dev_dbg(&instance->pdev->dev, "Span=%x,"
371 " number of quads=%x\n", span,
372 le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
373 block_span_info.noElements));
374 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
375 span_set = &(ldSpanInfo[ld].span_set[element]);
376 if (span_set->span_row_data_width == 0)
377 break;
378
379 dev_dbg(&instance->pdev->dev, "Span Set %x:"
380 "width=%x, diff=%x\n", element,
381 (unsigned int)span_set->span_row_data_width,
382 (unsigned int)span_set->diff);
383 dev_dbg(&instance->pdev->dev, "logical LBA"
384 "start=0x%08lx, end=0x%08lx\n",
385 (long unsigned int)span_set->log_start_lba,
386 (long unsigned int)span_set->log_end_lba);
387 dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
388 " end=0x%08lx\n",
389 (long unsigned int)span_set->span_row_start,
390 (long unsigned int)span_set->span_row_end);
391 dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
392 " end=0x%08lx\n",
393 (long unsigned int)span_set->data_row_start,
394 (long unsigned int)span_set->data_row_end);
395 dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
396 " end=0x%08lx\n",
397 (long unsigned int)span_set->data_strip_start,
398 (long unsigned int)span_set->data_strip_end);
399
400 for (span = 0; span < raid->spanDepth; span++) {
401 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
402 block_span_info.noElements) >=
403 element + 1) {
404 quad = &map->raidMap.ldSpanMap[ld].
405 spanBlock[span].block_span_info.
406 quad[element];
407 dev_dbg(&instance->pdev->dev, "Span=%x,"
408 "Quad=%x, diff=%x\n", span,
409 element, le32_to_cpu(quad->diff));
410 dev_dbg(&instance->pdev->dev,
411 "offset_in_span=0x%08lx\n",
412 (long unsigned int)le64_to_cpu(quad->offsetInSpan));
413 dev_dbg(&instance->pdev->dev,
414 "logical start=0x%08lx, end=0x%08lx\n",
415 (long unsigned int)le64_to_cpu(quad->logStart),
416 (long unsigned int)le64_to_cpu(quad->logEnd));
417 }
418 }
419 }
420 }
421 return 0;
422}
423#endif
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443u32 mr_spanset_get_span_block(struct megasas_instance *instance,
444 u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
445{
446 struct fusion_context *fusion = instance->ctrl_context;
447 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
448 LD_SPAN_SET *span_set;
449 struct MR_QUAD_ELEMENT *quad;
450 u32 span, info;
451 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
452
453 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
454 span_set = &(ldSpanInfo[ld].span_set[info]);
455
456 if (span_set->span_row_data_width == 0)
457 break;
458
459 if (row > span_set->data_row_end)
460 continue;
461
462 for (span = 0; span < raid->spanDepth; span++)
463 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
464 block_span_info.noElements) >= info+1) {
465 quad = &map->raidMap.ldSpanMap[ld].
466 spanBlock[span].
467 block_span_info.quad[info];
468 if (le32_to_cpu(quad->diff) == 0)
469 return SPAN_INVALID;
470 if (le64_to_cpu(quad->logStart) <= row &&
471 row <= le64_to_cpu(quad->logEnd) &&
472 (mega_mod64(row - le64_to_cpu(quad->logStart),
473 le32_to_cpu(quad->diff))) == 0) {
474 if (span_blk != NULL) {
475 u64 blk;
476 blk = mega_div64_32
477 ((row - le64_to_cpu(quad->logStart)),
478 le32_to_cpu(quad->diff));
479 blk = (blk + le64_to_cpu(quad->offsetInSpan))
480 << raid->stripeShift;
481 *span_blk = blk;
482 }
483 return span;
484 }
485 }
486 }
487 return SPAN_INVALID;
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506static u64 get_row_from_strip(struct megasas_instance *instance,
507 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
508{
509 struct fusion_context *fusion = instance->ctrl_context;
510 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
511 LD_SPAN_SET *span_set;
512 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
513 u32 info, strip_offset, span, span_offset;
514 u64 span_set_Strip, span_set_Row, retval;
515
516 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
517 span_set = &(ldSpanInfo[ld].span_set[info]);
518
519 if (span_set->span_row_data_width == 0)
520 break;
521 if (strip > span_set->data_strip_end)
522 continue;
523
524 span_set_Strip = strip - span_set->data_strip_start;
525 strip_offset = mega_mod64(span_set_Strip,
526 span_set->span_row_data_width);
527 span_set_Row = mega_div64_32(span_set_Strip,
528 span_set->span_row_data_width) * span_set->diff;
529 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
530 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
531 block_span_info.noElements) >= info+1) {
532 if (strip_offset >=
533 span_set->strip_offset[span])
534 span_offset++;
535 else
536 break;
537 }
538#if SPAN_DEBUG
539 dev_info(&instance->pdev->dev, "Strip 0x%llx,"
540 "span_set_Strip 0x%llx, span_set_Row 0x%llx"
541 "data width 0x%llx span offset 0x%x\n", strip,
542 (unsigned long long)span_set_Strip,
543 (unsigned long long)span_set_Row,
544 (unsigned long long)span_set->span_row_data_width,
545 span_offset);
546 dev_info(&instance->pdev->dev, "For strip 0x%llx"
547 "row is 0x%llx\n", strip,
548 (unsigned long long) span_set->data_row_start +
549 (unsigned long long) span_set_Row + (span_offset - 1));
550#endif
551 retval = (span_set->data_row_start + span_set_Row +
552 (span_offset - 1));
553 return retval;
554 }
555 return -1LLU;
556}
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575static u64 get_strip_from_row(struct megasas_instance *instance,
576 u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
577{
578 struct fusion_context *fusion = instance->ctrl_context;
579 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
580 LD_SPAN_SET *span_set;
581 struct MR_QUAD_ELEMENT *quad;
582 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
583 u32 span, info;
584 u64 strip;
585
586 for (info = 0; info < MAX_QUAD_DEPTH; info++) {
587 span_set = &(ldSpanInfo[ld].span_set[info]);
588
589 if (span_set->span_row_data_width == 0)
590 break;
591 if (row > span_set->data_row_end)
592 continue;
593
594 for (span = 0; span < raid->spanDepth; span++)
595 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
596 block_span_info.noElements) >= info+1) {
597 quad = &map->raidMap.ldSpanMap[ld].
598 spanBlock[span].block_span_info.quad[info];
599 if (le64_to_cpu(quad->logStart) <= row &&
600 row <= le64_to_cpu(quad->logEnd) &&
601 mega_mod64((row - le64_to_cpu(quad->logStart)),
602 le32_to_cpu(quad->diff)) == 0) {
603 strip = mega_div64_32
604 (((row - span_set->data_row_start)
605 - le64_to_cpu(quad->logStart)),
606 le32_to_cpu(quad->diff));
607 strip *= span_set->span_row_data_width;
608 strip += span_set->data_strip_start;
609 strip += span_set->strip_offset[span];
610 return strip;
611 }
612 }
613 }
614 dev_err(&instance->pdev->dev, "get_strip_from_row"
615 "returns invalid strip for ld=%x, row=%lx\n",
616 ld, (long unsigned int)row);
617 return -1;
618}
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636static u32 get_arm_from_strip(struct megasas_instance *instance,
637 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
638{
639 struct fusion_context *fusion = instance->ctrl_context;
640 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
641 LD_SPAN_SET *span_set;
642 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
643 u32 info, strip_offset, span, span_offset, retval;
644
645 for (info = 0 ; info < MAX_QUAD_DEPTH; info++) {
646 span_set = &(ldSpanInfo[ld].span_set[info]);
647
648 if (span_set->span_row_data_width == 0)
649 break;
650 if (strip > span_set->data_strip_end)
651 continue;
652
653 strip_offset = (uint)mega_mod64
654 ((strip - span_set->data_strip_start),
655 span_set->span_row_data_width);
656
657 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
658 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
659 block_span_info.noElements) >= info+1) {
660 if (strip_offset >=
661 span_set->strip_offset[span])
662 span_offset =
663 span_set->strip_offset[span];
664 else
665 break;
666 }
667#if SPAN_DEBUG
668 dev_info(&instance->pdev->dev, "get_arm_from_strip:"
669 "for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
670 (long unsigned int)strip, (strip_offset - span_offset));
671#endif
672 retval = (strip_offset - span_offset);
673 return retval;
674 }
675
676 dev_err(&instance->pdev->dev, "get_arm_from_strip"
677 "returns invalid arm for ld=%x strip=%lx\n",
678 ld, (long unsigned int)strip);
679
680 return -1;
681}
682
683
684u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
685 struct MR_DRV_RAID_MAP_ALL *map)
686{
687 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
688
689 u32 arm = 0;
690
691 switch (raid->level) {
692 case 0:
693 case 5:
694 case 6:
695 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
696 break;
697 case 1:
698
699 arm = get_arm_from_strip(instance, ld, stripe, map);
700 if (arm != -1U)
701 arm *= 2;
702 break;
703 }
704
705 return arm;
706}
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
727 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
728 struct RAID_CONTEXT *pRAID_Context,
729 struct MR_DRV_RAID_MAP_ALL *map)
730{
731 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
732 u32 pd, arRef;
733 u8 physArm, span;
734 u64 row;
735 u8 retval = TRUE;
736 u8 do_invader = 0;
737 u64 *pdBlock = &io_info->pdBlock;
738 u16 *pDevHandle = &io_info->devHandle;
739 u32 logArm, rowMod, armQ, arm;
740
741 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
742 instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
743 do_invader = 1;
744
745
746 row = io_info->start_row;
747 span = io_info->start_span;
748
749
750 if (raid->level == 6) {
751 logArm = get_arm_from_strip(instance, ld, stripRow, map);
752 if (logArm == -1U)
753 return FALSE;
754 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
755 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
756 arm = armQ + 1 + logArm;
757 if (arm >= SPAN_ROW_SIZE(map, ld, span))
758 arm -= SPAN_ROW_SIZE(map, ld, span);
759 physArm = (u8)arm;
760 } else
761
762 physArm = get_arm(instance, ld, span, stripRow, map);
763 if (physArm == 0xFF)
764 return FALSE;
765
766 arRef = MR_LdSpanArrayGet(ld, span, map);
767 pd = MR_ArPdGet(arRef, physArm, map);
768
769 if (pd != MR_PD_INVALID)
770 *pDevHandle = MR_PdDevHandleGet(pd, map);
771 else {
772 *pDevHandle = MR_PD_INVALID;
773 if ((raid->level >= 5) &&
774 (!do_invader || (do_invader &&
775 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
776 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
777 else if (raid->level == 1) {
778 pd = MR_ArPdGet(arRef, physArm + 1, map);
779 if (pd != MR_PD_INVALID)
780 *pDevHandle = MR_PdDevHandleGet(pd, map);
781 }
782 }
783
784 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
785 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
786 physArm;
787 io_info->span_arm = pRAID_Context->spanArm;
788 return retval;
789}
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
809 u16 stripRef, struct IO_REQUEST_INFO *io_info,
810 struct RAID_CONTEXT *pRAID_Context,
811 struct MR_DRV_RAID_MAP_ALL *map)
812{
813 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
814 u32 pd, arRef;
815 u8 physArm, span;
816 u64 row;
817 u8 retval = TRUE;
818 u8 do_invader = 0;
819 u64 *pdBlock = &io_info->pdBlock;
820 u16 *pDevHandle = &io_info->devHandle;
821
822 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER ||
823 instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
824 do_invader = 1;
825
826 row = mega_div64_32(stripRow, raid->rowDataSize);
827
828 if (raid->level == 6) {
829
830 u32 logArm = mega_mod64(stripRow, raid->rowDataSize);
831 u32 rowMod, armQ, arm;
832
833 if (raid->rowSize == 0)
834 return FALSE;
835
836 rowMod = mega_mod64(row, raid->rowSize);
837 armQ = raid->rowSize-1-rowMod;
838 arm = armQ+1+logArm;
839 if (arm >= raid->rowSize)
840 arm -= raid->rowSize;
841 physArm = (u8)arm;
842 } else {
843 if (raid->modFactor == 0)
844 return FALSE;
845 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow,
846 raid->modFactor),
847 map);
848 }
849
850 if (raid->spanDepth == 1) {
851 span = 0;
852 *pdBlock = row << raid->stripeShift;
853 } else {
854 span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
855 if (span == SPAN_INVALID)
856 return FALSE;
857 }
858
859
860 arRef = MR_LdSpanArrayGet(ld, span, map);
861 pd = MR_ArPdGet(arRef, physArm, map);
862
863 if (pd != MR_PD_INVALID)
864
865 *pDevHandle = MR_PdDevHandleGet(pd, map);
866 else {
867 *pDevHandle = MR_PD_INVALID;
868 if ((raid->level >= 5) &&
869 (!do_invader || (do_invader &&
870 (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
871 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
872 else if (raid->level == 1) {
873
874 pd = MR_ArPdGet(arRef, physArm + 1, map);
875 if (pd != MR_PD_INVALID)
876
877 *pDevHandle = MR_PdDevHandleGet(pd, map);
878 }
879 }
880
881 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
882 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
883 physArm;
884 io_info->span_arm = pRAID_Context->spanArm;
885 return retval;
886}
887
888
889
890
891
892
893
894
895
896
897u8
898MR_BuildRaidContext(struct megasas_instance *instance,
899 struct IO_REQUEST_INFO *io_info,
900 struct RAID_CONTEXT *pRAID_Context,
901 struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
902{
903 struct MR_LD_RAID *raid;
904 u32 ld, stripSize, stripe_mask;
905 u64 endLba, endStrip, endRow, start_row, start_strip;
906 u64 regStart;
907 u32 regSize;
908 u8 num_strips, numRows;
909 u16 ref_in_start_stripe, ref_in_end_stripe;
910 u64 ldStartBlock;
911 u32 numBlocks, ldTgtId;
912 u8 isRead;
913 u8 retval = 0;
914 u8 startlba_span = SPAN_INVALID;
915 u64 *pdBlock = &io_info->pdBlock;
916
917 ldStartBlock = io_info->ldStartBlock;
918 numBlocks = io_info->numBlocks;
919 ldTgtId = io_info->ldTgtId;
920 isRead = io_info->isRead;
921 io_info->IoforUnevenSpan = 0;
922 io_info->start_span = SPAN_INVALID;
923
924 ld = MR_TargetIdToLdGet(ldTgtId, map);
925 raid = MR_LdRaidGet(ld, map);
926
927
928
929
930
931 if (raid->rowDataSize == 0) {
932 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
933 return FALSE;
934 else if (instance->UnevenSpanSupport) {
935 io_info->IoforUnevenSpan = 1;
936 } else {
937 dev_info(&instance->pdev->dev,
938 "raid->rowDataSize is 0, but has SPAN[0]"
939 "rowDataSize = 0x%0x,"
940 "but there is _NO_ UnevenSpanSupport\n",
941 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
942 return FALSE;
943 }
944 }
945
946 stripSize = 1 << raid->stripeShift;
947 stripe_mask = stripSize-1;
948
949
950
951
952
953 start_strip = ldStartBlock >> raid->stripeShift;
954 ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask);
955 endLba = ldStartBlock + numBlocks - 1;
956 ref_in_end_stripe = (u16)(endLba & stripe_mask);
957 endStrip = endLba >> raid->stripeShift;
958 num_strips = (u8)(endStrip - start_strip + 1);
959
960 if (io_info->IoforUnevenSpan) {
961 start_row = get_row_from_strip(instance, ld, start_strip, map);
962 endRow = get_row_from_strip(instance, ld, endStrip, map);
963 if (start_row == -1ULL || endRow == -1ULL) {
964 dev_info(&instance->pdev->dev, "return from %s %d."
965 "Send IO w/o region lock.\n",
966 __func__, __LINE__);
967 return FALSE;
968 }
969
970 if (raid->spanDepth == 1) {
971 startlba_span = 0;
972 *pdBlock = start_row << raid->stripeShift;
973 } else
974 startlba_span = (u8)mr_spanset_get_span_block(instance,
975 ld, start_row, pdBlock, map);
976 if (startlba_span == SPAN_INVALID) {
977 dev_info(&instance->pdev->dev, "return from %s %d"
978 "for row 0x%llx,start strip %llx"
979 "endSrip %llx\n", __func__, __LINE__,
980 (unsigned long long)start_row,
981 (unsigned long long)start_strip,
982 (unsigned long long)endStrip);
983 return FALSE;
984 }
985 io_info->start_span = startlba_span;
986 io_info->start_row = start_row;
987#if SPAN_DEBUG
988 dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
989 "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
990 " span 0x%x\n", __func__, __LINE__,
991 (unsigned long long)start_row,
992 (unsigned long long)start_strip,
993 (unsigned long long)endStrip, startlba_span);
994 dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
995 "Start span 0x%x\n", (unsigned long long)start_row,
996 (unsigned long long)endRow, startlba_span);
997#endif
998 } else {
999 start_row = mega_div64_32(start_strip, raid->rowDataSize);
1000 endRow = mega_div64_32(endStrip, raid->rowDataSize);
1001 }
1002 numRows = (u8)(endRow - start_row + 1);
1003
1004
1005
1006
1007
1008
1009 regStart = start_row << raid->stripeShift;
1010
1011 regSize = stripSize;
1012
1013
1014 if (raid->capability.fpCapable) {
1015 if (isRead)
1016 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
1017 ((num_strips == 1) ||
1018 raid->capability.
1019 fpReadAcrossStripe));
1020 else
1021 io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
1022 ((num_strips == 1) ||
1023 raid->capability.
1024 fpWriteAcrossStripe));
1025 } else
1026 io_info->fpOkForIo = FALSE;
1027
1028 if (numRows == 1) {
1029
1030 if (num_strips == 1) {
1031 regStart += ref_in_start_stripe;
1032 regSize = numBlocks;
1033 }
1034
1035 } else if (io_info->IoforUnevenSpan == 0) {
1036
1037
1038
1039
1040 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
1041 regStart += ref_in_start_stripe;
1042
1043
1044 regSize = stripSize - ref_in_start_stripe;
1045 }
1046
1047
1048 if (numRows > 2)
1049 regSize += (numRows-2) << raid->stripeShift;
1050
1051
1052 if (endStrip == endRow*raid->rowDataSize)
1053 regSize += ref_in_end_stripe+1;
1054 else
1055 regSize += stripSize;
1056 } else {
1057
1058
1059
1060
1061 if (start_strip == (get_strip_from_row(instance, ld, start_row, map) +
1062 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
1063 regStart += ref_in_start_stripe;
1064
1065
1066
1067 regSize = stripSize - ref_in_start_stripe;
1068 }
1069
1070
1071 if (numRows > 2)
1072
1073 regSize += (numRows-2) << raid->stripeShift;
1074
1075
1076 if (endStrip == get_strip_from_row(instance, ld, endRow, map))
1077 regSize += ref_in_end_stripe + 1;
1078 else
1079 regSize += stripSize;
1080 }
1081
1082 pRAID_Context->timeoutValue =
1083 cpu_to_le16(raid->fpIoTimeoutForLd ?
1084 raid->fpIoTimeoutForLd :
1085 map->raidMap.fpPdIoTimeoutSec);
1086 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
1087 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
1088 pRAID_Context->regLockFlags = (isRead) ?
1089 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
1090 else
1091 pRAID_Context->regLockFlags = (isRead) ?
1092 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
1093 pRAID_Context->VirtualDiskTgtId = raid->targetId;
1094 pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
1095 pRAID_Context->regLockLength = cpu_to_le32(regSize);
1096 pRAID_Context->configSeqNum = raid->seqNum;
1097
1098 *raidLUN = raid->LUN;
1099
1100
1101
1102
1103 if (io_info->fpOkForIo) {
1104 retval = io_info->IoforUnevenSpan ?
1105 mr_spanset_get_phy_params(instance, ld,
1106 start_strip, ref_in_start_stripe,
1107 io_info, pRAID_Context, map) :
1108 MR_GetPhyParams(instance, ld, start_strip,
1109 ref_in_start_stripe, io_info,
1110 pRAID_Context, map);
1111
1112 if (io_info->devHandle == MR_PD_INVALID)
1113 io_info->fpOkForIo = FALSE;
1114 return retval;
1115 } else if (isRead) {
1116 uint stripIdx;
1117 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
1118 retval = io_info->IoforUnevenSpan ?
1119 mr_spanset_get_phy_params(instance, ld,
1120 start_strip + stripIdx,
1121 ref_in_start_stripe, io_info,
1122 pRAID_Context, map) :
1123 MR_GetPhyParams(instance, ld,
1124 start_strip + stripIdx, ref_in_start_stripe,
1125 io_info, pRAID_Context, map);
1126 if (!retval)
1127 return TRUE;
1128 }
1129 }
1130
1131#if SPAN_DEBUG
1132
1133 if (io_info->IoforUnevenSpan)
1134 get_arm_from_strip(instance, ld, start_strip, map);
1135#endif
1136 return TRUE;
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
1151 PLD_SPAN_INFO ldSpanInfo)
1152{
1153 u8 span, count;
1154 u32 element, span_row_width;
1155 u64 span_row;
1156 struct MR_LD_RAID *raid;
1157 LD_SPAN_SET *span_set, *span_set_prev;
1158 struct MR_QUAD_ELEMENT *quad;
1159 int ldCount;
1160 u16 ld;
1161
1162
1163 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1164 ld = MR_TargetIdToLdGet(ldCount, map);
1165 if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
1166 continue;
1167 raid = MR_LdRaidGet(ld, map);
1168 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
1169 for (span = 0; span < raid->spanDepth; span++) {
1170 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1171 block_span_info.noElements) <
1172 element + 1)
1173 continue;
1174 span_set = &(ldSpanInfo[ld].span_set[element]);
1175 quad = &map->raidMap.ldSpanMap[ld].
1176 spanBlock[span].block_span_info.
1177 quad[element];
1178
1179 span_set->diff = le32_to_cpu(quad->diff);
1180
1181 for (count = 0, span_row_width = 0;
1182 count < raid->spanDepth; count++) {
1183 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].
1184 spanBlock[count].
1185 block_span_info.
1186 noElements) >= element + 1) {
1187 span_set->strip_offset[count] =
1188 span_row_width;
1189 span_row_width +=
1190 MR_LdSpanPtrGet
1191 (ld, count, map)->spanRowDataSize;
1192 printk(KERN_INFO "megasas:"
1193 "span %x rowDataSize %x\n",
1194 count, MR_LdSpanPtrGet
1195 (ld, count, map)->spanRowDataSize);
1196 }
1197 }
1198
1199 span_set->span_row_data_width = span_row_width;
1200 span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) -
1201 le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)),
1202 le32_to_cpu(quad->diff));
1203
1204 if (element == 0) {
1205 span_set->log_start_lba = 0;
1206 span_set->log_end_lba =
1207 ((span_row << raid->stripeShift)
1208 * span_row_width) - 1;
1209
1210 span_set->span_row_start = 0;
1211 span_set->span_row_end = span_row - 1;
1212
1213 span_set->data_strip_start = 0;
1214 span_set->data_strip_end =
1215 (span_row * span_row_width) - 1;
1216
1217 span_set->data_row_start = 0;
1218 span_set->data_row_end =
1219 (span_row * le32_to_cpu(quad->diff)) - 1;
1220 } else {
1221 span_set_prev = &(ldSpanInfo[ld].
1222 span_set[element - 1]);
1223 span_set->log_start_lba =
1224 span_set_prev->log_end_lba + 1;
1225 span_set->log_end_lba =
1226 span_set->log_start_lba +
1227 ((span_row << raid->stripeShift)
1228 * span_row_width) - 1;
1229
1230 span_set->span_row_start =
1231 span_set_prev->span_row_end + 1;
1232 span_set->span_row_end =
1233 span_set->span_row_start + span_row - 1;
1234
1235 span_set->data_strip_start =
1236 span_set_prev->data_strip_end + 1;
1237 span_set->data_strip_end =
1238 span_set->data_strip_start +
1239 (span_row * span_row_width) - 1;
1240
1241 span_set->data_row_start =
1242 span_set_prev->data_row_end + 1;
1243 span_set->data_row_end =
1244 span_set->data_row_start +
1245 (span_row * le32_to_cpu(quad->diff)) - 1;
1246 }
1247 break;
1248 }
1249 if (span == raid->spanDepth)
1250 break;
1251 }
1252 }
1253#if SPAN_DEBUG
1254 getSpanInfo(map, ldSpanInfo);
1255#endif
1256
1257}
1258
1259void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1260 struct LD_LOAD_BALANCE_INFO *lbInfo)
1261{
1262 int ldCount;
1263 u16 ld;
1264 struct MR_LD_RAID *raid;
1265
1266 if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
1267 lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
1268
1269 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1270 ld = MR_TargetIdToLdGet(ldCount, drv_map);
1271 if (ld >= MAX_LOGICAL_DRIVES_EXT) {
1272 lbInfo[ldCount].loadBalanceFlag = 0;
1273 continue;
1274 }
1275
1276 raid = MR_LdRaidGet(ld, drv_map);
1277 if ((raid->level != 1) ||
1278 (raid->ldState != MR_LD_STATE_OPTIMAL)) {
1279 lbInfo[ldCount].loadBalanceFlag = 0;
1280 continue;
1281 }
1282 lbInfo[ldCount].loadBalanceFlag = 1;
1283 }
1284}
1285
1286u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1287 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1288{
1289 struct fusion_context *fusion;
1290 struct MR_LD_RAID *raid;
1291 struct MR_DRV_RAID_MAP_ALL *drv_map;
1292 u16 pend0, pend1, ld;
1293 u64 diff0, diff1;
1294 u8 bestArm, pd0, pd1, span, arm;
1295 u32 arRef, span_row_size;
1296
1297 u64 block = io_info->ldStartBlock;
1298 u32 count = io_info->numBlocks;
1299
1300 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
1301 >> RAID_CTX_SPANARM_SPAN_SHIFT);
1302 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
1303
1304
1305 fusion = instance->ctrl_context;
1306 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1307 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
1308 raid = MR_LdRaidGet(ld, drv_map);
1309 span_row_size = instance->UnevenSpanSupport ?
1310 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
1311
1312 arRef = MR_LdSpanArrayGet(ld, span, drv_map);
1313 pd0 = MR_ArPdGet(arRef, arm, drv_map);
1314 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
1315 (arm + 1 - span_row_size) : arm + 1, drv_map);
1316
1317
1318 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
1319 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
1320
1321
1322 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
1323 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
1324 bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
1325
1326 if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
1327 (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
1328 bestArm ^= 1;
1329
1330
1331 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
1332 lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
1333 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
1334#if SPAN_DEBUG
1335 if (arm != bestArm)
1336 dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
1337 "occur - span 0x%x arm 0x%x bestArm 0x%x "
1338 "io_info->span_arm 0x%x\n",
1339 span, arm, bestArm, io_info->span_arm);
1340#endif
1341 return io_info->pd_after_lb;
1342}
1343
1344u16 get_updated_dev_handle(struct megasas_instance *instance,
1345 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1346{
1347 u8 arm_pd;
1348 u16 devHandle;
1349 struct fusion_context *fusion;
1350 struct MR_DRV_RAID_MAP_ALL *drv_map;
1351
1352 fusion = instance->ctrl_context;
1353 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1354
1355
1356 arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info);
1357 devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
1358 atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
1359 return devHandle;
1360}
1361