1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/nvme.h>
21#include <linux/bio.h>
22#include <linux/bitops.h>
23#include <linux/blkdev.h>
24#include <linux/compat.h>
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/fs.h>
28#include <linux/genhd.h>
29#include <linux/idr.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/io.h>
33#include <linux/kdev_t.h>
34#include <linux/kthread.h>
35#include <linux/kernel.h>
36#include <linux/mm.h>
37#include <linux/module.h>
38#include <linux/moduleparam.h>
39#include <linux/pci.h>
40#include <linux/poison.h>
41#include <linux/sched.h>
42#include <linux/slab.h>
43#include <linux/types.h>
44#include <scsi/sg.h>
45#include <scsi/scsi.h>
46
47
48static int sg_version_num = 30534;
49
50#define SNTI_TRANSLATION_SUCCESS 0
51#define SNTI_INTERNAL_ERROR 1
52
53
54#define VPD_SUPPORTED_PAGES 0x00
55#define VPD_SERIAL_NUMBER 0x80
56#define VPD_DEVICE_IDENTIFIERS 0x83
57#define VPD_EXTENDED_INQUIRY 0x86
58#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1
59
60
61#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6
62#define REPORT_LUNS_SR_OFFSET 2
63#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10
64#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4
65#define REQUEST_SENSE_DESC_OFFSET 1
66#define REQUEST_SENSE_DESC_MASK 0x01
67#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1
68#define INQUIRY_EVPD_BYTE_OFFSET 1
69#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2
70#define INQUIRY_EVPD_BIT_MASK 1
71#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3
72#define START_STOP_UNIT_CDB_IMMED_OFFSET 1
73#define START_STOP_UNIT_CDB_IMMED_MASK 0x1
74#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3
75#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF
76#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4
77#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0
78#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4
79#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4
80#define START_STOP_UNIT_CDB_START_OFFSET 4
81#define START_STOP_UNIT_CDB_START_MASK 0x1
82#define WRITE_BUFFER_CDB_MODE_OFFSET 1
83#define WRITE_BUFFER_CDB_MODE_MASK 0x1F
84#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2
85#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3
86#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6
87#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1
88#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0
89#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6
90#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1
91#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20
92#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1
93#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10
94#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4
95#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8
96#define FORMAT_UNIT_PROT_INT_OFFSET 3
97#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0
98#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07
99#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7
100
101
102#define NIBBLE_SHIFT 4
103#define FIXED_SENSE_DATA 0x70
104#define DESC_FORMAT_SENSE_DATA 0x72
105#define FIXED_SENSE_DATA_ADD_LENGTH 10
106#define LUN_ENTRY_SIZE 8
107#define LUN_DATA_HEADER_SIZE 8
108#define ALL_LUNS_RETURNED 0x02
109#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01
110#define RESTRICTED_LUNS_RETURNED 0x00
111#define NVME_POWER_STATE_START_VALID 0x00
112#define NVME_POWER_STATE_ACTIVE 0x01
113#define NVME_POWER_STATE_IDLE 0x02
114#define NVME_POWER_STATE_STANDBY 0x03
115#define NVME_POWER_STATE_LU_CONTROL 0x07
116#define POWER_STATE_0 0
117#define POWER_STATE_1 1
118#define POWER_STATE_2 2
119#define POWER_STATE_3 3
120#define DOWNLOAD_SAVE_ACTIVATE 0x05
121#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E
122#define ACTIVATE_DEFERRED_MICROCODE 0x0F
123#define FORMAT_UNIT_IMMED_MASK 0x2
124#define FORMAT_UNIT_IMMED_OFFSET 1
125#define KELVIN_TEMP_FACTOR 273
126#define FIXED_FMT_SENSE_DATA_SIZE 18
127#define DESC_FMT_SENSE_DATA_SIZE 8
128
129
130#define INQ_STANDARD_INQUIRY_PAGE 0x00
131#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00
132#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80
133#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83
134#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86
135#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1
136#define INQ_SERIAL_NUMBER_LENGTH 0x14
137#define INQ_NUM_SUPPORTED_VPD_PAGES 5
138#define VERSION_SPC_4 0x06
139#define ACA_UNSUPPORTED 0
140#define STANDARD_INQUIRY_LENGTH 36
141#define ADDITIONAL_STD_INQ_LENGTH 31
142#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C
143#define RESERVED_FIELD 0
144
145
146#define IO_CDB_WP_MASK 0xE0
147#define IO_CDB_WP_SHIFT 5
148#define IO_CDB_FUA_MASK 0x8
149#define IO_6_CDB_LBA_OFFSET 0
150#define IO_6_CDB_LBA_MASK 0x001FFFFF
151#define IO_6_CDB_TX_LEN_OFFSET 4
152#define IO_6_DEFAULT_TX_LEN 256
153#define IO_10_CDB_LBA_OFFSET 2
154#define IO_10_CDB_TX_LEN_OFFSET 7
155#define IO_10_CDB_WP_OFFSET 1
156#define IO_10_CDB_FUA_OFFSET 1
157#define IO_12_CDB_LBA_OFFSET 2
158#define IO_12_CDB_TX_LEN_OFFSET 6
159#define IO_12_CDB_WP_OFFSET 1
160#define IO_12_CDB_FUA_OFFSET 1
161#define IO_16_CDB_FUA_OFFSET 1
162#define IO_16_CDB_WP_OFFSET 1
163#define IO_16_CDB_LBA_OFFSET 2
164#define IO_16_CDB_TX_LEN_OFFSET 10
165
166
167#define MODE_PAGE_INFO_EXCEP 0x1C
168#define MODE_PAGE_CACHING 0x08
169#define MODE_PAGE_CONTROL 0x0A
170#define MODE_PAGE_POWER_CONDITION 0x1A
171#define MODE_PAGE_RETURN_ALL 0x3F
172#define MODE_PAGE_BLK_DES_LEN 0x08
173#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10
174#define MODE_PAGE_CACHING_LEN 0x14
175#define MODE_PAGE_CONTROL_LEN 0x0C
176#define MODE_PAGE_POW_CND_LEN 0x28
177#define MODE_PAGE_INF_EXC_LEN 0x0C
178#define MODE_PAGE_ALL_LEN 0x54
179#define MODE_SENSE6_MPH_SIZE 4
180#define MODE_SENSE6_ALLOC_LEN_OFFSET 4
181#define MODE_SENSE_PAGE_CONTROL_OFFSET 2
182#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0
183#define MODE_SENSE_PAGE_CODE_OFFSET 2
184#define MODE_SENSE_PAGE_CODE_MASK 0x3F
185#define MODE_SENSE_LLBAA_OFFSET 1
186#define MODE_SENSE_LLBAA_MASK 0x10
187#define MODE_SENSE_LLBAA_SHIFT 4
188#define MODE_SENSE_DBD_OFFSET 1
189#define MODE_SENSE_DBD_MASK 8
190#define MODE_SENSE_DBD_SHIFT 3
191#define MODE_SENSE10_MPH_SIZE 8
192#define MODE_SENSE10_ALLOC_LEN_OFFSET 7
193#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1
194#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1
195#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4
196#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7
197#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10
198#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1
199#define MODE_SELECT_6_BD_OFFSET 3
200#define MODE_SELECT_10_BD_OFFSET 6
201#define MODE_SELECT_10_LLBAA_OFFSET 4
202#define MODE_SELECT_10_LLBAA_MASK 1
203#define MODE_SELECT_6_MPH_SIZE 4
204#define MODE_SELECT_10_MPH_SIZE 8
205#define CACHING_MODE_PAGE_WCE_MASK 0x04
206#define MODE_SENSE_BLK_DESC_ENABLED 0
207#define MODE_SENSE_BLK_DESC_COUNT 1
208#define MODE_SELECT_PAGE_CODE_MASK 0x3F
209#define SHORT_DESC_BLOCK 8
210#define LONG_DESC_BLOCK 16
211#define MODE_PAGE_POW_CND_LEN_FIELD 0x26
212#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A
213#define MODE_PAGE_CACHING_LEN_FIELD 0x12
214#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A
215#define MODE_SENSE_PC_CURRENT_VALUES 0
216
217
218#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00
219#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07
220#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F
221#define LOG_PAGE_TEMPERATURE_PAGE 0x0D
222#define LOG_SENSE_CDB_SP_OFFSET 1
223#define LOG_SENSE_CDB_SP_NOT_ENABLED 0
224#define LOG_SENSE_CDB_PC_OFFSET 2
225#define LOG_SENSE_CDB_PC_MASK 0xC0
226#define LOG_SENSE_CDB_PC_SHIFT 6
227#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1
228#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F
229#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7
230#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8
231#define LOG_INFO_EXCP_PAGE_LENGTH 0xC
232#define REMAINING_TEMP_PAGE_LENGTH 0xC
233#define LOG_TEMP_PAGE_LENGTH 0x10
234#define LOG_TEMP_UNKNOWN 0xFF
235#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3
236
237
238#define READ_CAP_10_RESP_SIZE 8
239#define READ_CAP_16_RESP_SIZE 32
240
241
242#define BYTES_TO_DWORDS 4
243#define NVME_MAX_FIRMWARE_SLOT 7
244
245
246#define REPORT_LUNS_FIRST_LUN_OFFSET 8
247
248
249
250#define SCSI_ASC_NO_SENSE 0x00
251#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03
252#define SCSI_ASC_LUN_NOT_READY 0x04
253#define SCSI_ASC_WARNING 0x0B
254#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10
255#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10
256#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10
257#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11
258#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D
259#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20
260#define SCSI_ASC_ILLEGAL_COMMAND 0x20
261#define SCSI_ASC_ILLEGAL_BLOCK 0x21
262#define SCSI_ASC_INVALID_CDB 0x24
263#define SCSI_ASC_INVALID_LUN 0x25
264#define SCSI_ASC_INVALID_PARAMETER 0x26
265#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31
266#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44
267
268
269
270#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00
271#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01
272#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01
273#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02
274#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03
275#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04
276#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08
277#define SCSI_ASCQ_INVALID_LUN_ID 0x09
278
279
280
281
282
283#define DEVICE_SPECIFIC_PARAMETER 0
284#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
285
286
287
288#define GET_OPCODE(cdb) cdb[0]
289
290#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
291
292#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
293
294#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
295(cdb[index + 1] << 8) | \
296(cdb[index + 2] << 0))
297
298#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
299(cdb[index + 1] << 16) | \
300(cdb[index + 2] << 8) | \
301(cdb[index + 3] << 0))
302
303#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
304(((u64)cdb[index + 1]) << 48) | \
305(((u64)cdb[index + 2]) << 40) | \
306(((u64)cdb[index + 3]) << 32) | \
307(((u64)cdb[index + 4]) << 24) | \
308(((u64)cdb[index + 5]) << 16) | \
309(((u64)cdb[index + 6]) << 8) | \
310(((u64)cdb[index + 7]) << 0))
311
312
313#define GET_INQ_EVPD_BIT(cdb) \
314((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \
315INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
316
317#define GET_INQ_PAGE_CODE(cdb) \
318(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
319
320#define GET_INQ_ALLOC_LENGTH(cdb) \
321(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
322
323
324#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \
325(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
326
327
328#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \
329(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
330
331#define IS_READ_CAP_16(cdb) \
332((cdb[0] == SERVICE_ACTION_IN_16 && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
333
334
335#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \
336(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
337
338
339#define GET_MODE_SENSE_DBD(cdb) \
340((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \
341MODE_SENSE_DBD_SHIFT)
342
343#define GET_MODE_SENSE_LLBAA(cdb) \
344((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \
345MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
346
347#define GET_MODE_SENSE_MPH_SIZE(cdb10) \
348(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
349
350
351
352
353
354struct nvme_trans_io_cdb {
355 u8 fua;
356 u8 prot_info;
357 u64 lba;
358 u32 xfer_len;
359};
360
361
362
363
364
365
366
367static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
368 unsigned long n)
369{
370 int res = SNTI_TRANSLATION_SUCCESS;
371 unsigned long not_copied;
372 int i;
373 void *index = from;
374 size_t remaining = n;
375 size_t xfer_len;
376
377 if (hdr->iovec_count > 0) {
378 struct sg_iovec sgl;
379
380 for (i = 0; i < hdr->iovec_count; i++) {
381 not_copied = copy_from_user(&sgl, hdr->dxferp +
382 i * sizeof(struct sg_iovec),
383 sizeof(struct sg_iovec));
384 if (not_copied)
385 return -EFAULT;
386 xfer_len = min(remaining, sgl.iov_len);
387 not_copied = copy_to_user(sgl.iov_base, index,
388 xfer_len);
389 if (not_copied) {
390 res = -EFAULT;
391 break;
392 }
393 index += xfer_len;
394 remaining -= xfer_len;
395 if (remaining == 0)
396 break;
397 }
398 return res;
399 }
400 not_copied = copy_to_user(hdr->dxferp, from, n);
401 if (not_copied)
402 res = -EFAULT;
403 return res;
404}
405
406
407
408static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
409 unsigned long n)
410{
411 int res = SNTI_TRANSLATION_SUCCESS;
412 unsigned long not_copied;
413 int i;
414 void *index = to;
415 size_t remaining = n;
416 size_t xfer_len;
417
418 if (hdr->iovec_count > 0) {
419 struct sg_iovec sgl;
420
421 for (i = 0; i < hdr->iovec_count; i++) {
422 not_copied = copy_from_user(&sgl, hdr->dxferp +
423 i * sizeof(struct sg_iovec),
424 sizeof(struct sg_iovec));
425 if (not_copied)
426 return -EFAULT;
427 xfer_len = min(remaining, sgl.iov_len);
428 not_copied = copy_from_user(index, sgl.iov_base,
429 xfer_len);
430 if (not_copied) {
431 res = -EFAULT;
432 break;
433 }
434 index += xfer_len;
435 remaining -= xfer_len;
436 if (remaining == 0)
437 break;
438 }
439 return res;
440 }
441
442 not_copied = copy_from_user(to, hdr->dxferp, n);
443 if (not_copied)
444 res = -EFAULT;
445 return res;
446}
447
448
449
450static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
451 u8 asc, u8 ascq)
452{
453 int res = SNTI_TRANSLATION_SUCCESS;
454 u8 xfer_len;
455 u8 resp[DESC_FMT_SENSE_DATA_SIZE];
456
457 if (scsi_status_is_good(status)) {
458 hdr->status = SAM_STAT_GOOD;
459 hdr->masked_status = GOOD;
460 hdr->host_status = DID_OK;
461 hdr->driver_status = DRIVER_OK;
462 hdr->sb_len_wr = 0;
463 } else {
464 hdr->status = status;
465 hdr->masked_status = status >> 1;
466 hdr->host_status = DID_OK;
467 hdr->driver_status = DRIVER_OK;
468
469 memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
470 resp[0] = DESC_FORMAT_SENSE_DATA;
471 resp[1] = sense_key;
472 resp[2] = asc;
473 resp[3] = ascq;
474
475 xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
476 hdr->sb_len_wr = xfer_len;
477 if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
478 res = -EFAULT;
479 }
480
481 return res;
482}
483
484static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
485{
486 u8 status, sense_key, asc, ascq;
487 int res = SNTI_TRANSLATION_SUCCESS;
488
489
490 if (nvme_sc < 0)
491 return nvme_sc;
492
493
494 nvme_sc &= 0x7FF;
495
496 switch (nvme_sc) {
497
498 case NVME_SC_SUCCESS:
499 status = SAM_STAT_GOOD;
500 sense_key = NO_SENSE;
501 asc = SCSI_ASC_NO_SENSE;
502 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
503 break;
504 case NVME_SC_INVALID_OPCODE:
505 status = SAM_STAT_CHECK_CONDITION;
506 sense_key = ILLEGAL_REQUEST;
507 asc = SCSI_ASC_ILLEGAL_COMMAND;
508 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
509 break;
510 case NVME_SC_INVALID_FIELD:
511 status = SAM_STAT_CHECK_CONDITION;
512 sense_key = ILLEGAL_REQUEST;
513 asc = SCSI_ASC_INVALID_CDB;
514 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
515 break;
516 case NVME_SC_DATA_XFER_ERROR:
517 status = SAM_STAT_CHECK_CONDITION;
518 sense_key = MEDIUM_ERROR;
519 asc = SCSI_ASC_NO_SENSE;
520 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
521 break;
522 case NVME_SC_POWER_LOSS:
523 status = SAM_STAT_TASK_ABORTED;
524 sense_key = ABORTED_COMMAND;
525 asc = SCSI_ASC_WARNING;
526 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
527 break;
528 case NVME_SC_INTERNAL:
529 status = SAM_STAT_CHECK_CONDITION;
530 sense_key = HARDWARE_ERROR;
531 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
532 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
533 break;
534 case NVME_SC_ABORT_REQ:
535 status = SAM_STAT_TASK_ABORTED;
536 sense_key = ABORTED_COMMAND;
537 asc = SCSI_ASC_NO_SENSE;
538 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
539 break;
540 case NVME_SC_ABORT_QUEUE:
541 status = SAM_STAT_TASK_ABORTED;
542 sense_key = ABORTED_COMMAND;
543 asc = SCSI_ASC_NO_SENSE;
544 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
545 break;
546 case NVME_SC_FUSED_FAIL:
547 status = SAM_STAT_TASK_ABORTED;
548 sense_key = ABORTED_COMMAND;
549 asc = SCSI_ASC_NO_SENSE;
550 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
551 break;
552 case NVME_SC_FUSED_MISSING:
553 status = SAM_STAT_TASK_ABORTED;
554 sense_key = ABORTED_COMMAND;
555 asc = SCSI_ASC_NO_SENSE;
556 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
557 break;
558 case NVME_SC_INVALID_NS:
559 status = SAM_STAT_CHECK_CONDITION;
560 sense_key = ILLEGAL_REQUEST;
561 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
562 ascq = SCSI_ASCQ_INVALID_LUN_ID;
563 break;
564 case NVME_SC_LBA_RANGE:
565 status = SAM_STAT_CHECK_CONDITION;
566 sense_key = ILLEGAL_REQUEST;
567 asc = SCSI_ASC_ILLEGAL_BLOCK;
568 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
569 break;
570 case NVME_SC_CAP_EXCEEDED:
571 status = SAM_STAT_CHECK_CONDITION;
572 sense_key = MEDIUM_ERROR;
573 asc = SCSI_ASC_NO_SENSE;
574 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
575 break;
576 case NVME_SC_NS_NOT_READY:
577 status = SAM_STAT_CHECK_CONDITION;
578 sense_key = NOT_READY;
579 asc = SCSI_ASC_LUN_NOT_READY;
580 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
581 break;
582
583
584 case NVME_SC_INVALID_FORMAT:
585 status = SAM_STAT_CHECK_CONDITION;
586 sense_key = ILLEGAL_REQUEST;
587 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
588 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
589 break;
590 case NVME_SC_BAD_ATTRIBUTES:
591 status = SAM_STAT_CHECK_CONDITION;
592 sense_key = ILLEGAL_REQUEST;
593 asc = SCSI_ASC_INVALID_CDB;
594 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
595 break;
596
597
598 case NVME_SC_WRITE_FAULT:
599 status = SAM_STAT_CHECK_CONDITION;
600 sense_key = MEDIUM_ERROR;
601 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
602 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
603 break;
604 case NVME_SC_READ_ERROR:
605 status = SAM_STAT_CHECK_CONDITION;
606 sense_key = MEDIUM_ERROR;
607 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
608 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
609 break;
610 case NVME_SC_GUARD_CHECK:
611 status = SAM_STAT_CHECK_CONDITION;
612 sense_key = MEDIUM_ERROR;
613 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
614 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
615 break;
616 case NVME_SC_APPTAG_CHECK:
617 status = SAM_STAT_CHECK_CONDITION;
618 sense_key = MEDIUM_ERROR;
619 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
620 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
621 break;
622 case NVME_SC_REFTAG_CHECK:
623 status = SAM_STAT_CHECK_CONDITION;
624 sense_key = MEDIUM_ERROR;
625 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
626 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
627 break;
628 case NVME_SC_COMPARE_FAILED:
629 status = SAM_STAT_CHECK_CONDITION;
630 sense_key = MISCOMPARE;
631 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
632 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
633 break;
634 case NVME_SC_ACCESS_DENIED:
635 status = SAM_STAT_CHECK_CONDITION;
636 sense_key = ILLEGAL_REQUEST;
637 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
638 ascq = SCSI_ASCQ_INVALID_LUN_ID;
639 break;
640
641
642 case NVME_SC_CMDID_CONFLICT:
643 case NVME_SC_CMD_SEQ_ERROR:
644 case NVME_SC_CQ_INVALID:
645 case NVME_SC_QID_INVALID:
646 case NVME_SC_QUEUE_SIZE:
647 case NVME_SC_ABORT_LIMIT:
648 case NVME_SC_ABORT_MISSING:
649 case NVME_SC_ASYNC_LIMIT:
650 case NVME_SC_FIRMWARE_SLOT:
651 case NVME_SC_FIRMWARE_IMAGE:
652 case NVME_SC_INVALID_VECTOR:
653 case NVME_SC_INVALID_LOG_PAGE:
654 default:
655 status = SAM_STAT_CHECK_CONDITION;
656 sense_key = ILLEGAL_REQUEST;
657 asc = SCSI_ASC_NO_SENSE;
658 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
659 break;
660 }
661
662 res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
663
664 return res;
665}
666
667
668
669static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
670 struct sg_io_hdr *hdr, u8 *inq_response,
671 int alloc_len)
672{
673 struct nvme_dev *dev = ns->dev;
674 dma_addr_t dma_addr;
675 void *mem;
676 struct nvme_id_ns *id_ns;
677 int res = SNTI_TRANSLATION_SUCCESS;
678 int nvme_sc;
679 int xfer_len;
680 u8 resp_data_format = 0x02;
681 u8 protect;
682 u8 cmdque = 0x01 << 1;
683 u8 fw_offset = sizeof(dev->firmware_rev);
684
685 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
686 &dma_addr, GFP_KERNEL);
687 if (mem == NULL) {
688 res = -ENOMEM;
689 goto out_dma;
690 }
691
692
693 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
694 res = nvme_trans_status_code(hdr, nvme_sc);
695
696
697
698
699
700
701
702 if (res)
703 goto out_free;
704 if (nvme_sc) {
705 res = nvme_sc;
706 goto out_free;
707 }
708 id_ns = mem;
709 (id_ns->dps) ? (protect = 0x01) : (protect = 0);
710
711 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
712 inq_response[2] = VERSION_SPC_4;
713 inq_response[3] = resp_data_format;
714 inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
715 inq_response[5] = protect;
716 inq_response[7] = cmdque;
717 strncpy(&inq_response[8], "NVMe ", 8);
718 strncpy(&inq_response[16], dev->model, 16);
719
720 while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
721 fw_offset--;
722 fw_offset -= 4;
723 strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
724
725 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
726 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
727
728 out_free:
729 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
730 dma_addr);
731 out_dma:
732 return res;
733}
734
735static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
736 struct sg_io_hdr *hdr, u8 *inq_response,
737 int alloc_len)
738{
739 int res = SNTI_TRANSLATION_SUCCESS;
740 int xfer_len;
741
742 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
743 inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE;
744 inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES;
745 inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
746 inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
747 inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
748 inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
749 inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
750
751 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
752 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
753
754 return res;
755}
756
757static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
758 struct sg_io_hdr *hdr, u8 *inq_response,
759 int alloc_len)
760{
761 struct nvme_dev *dev = ns->dev;
762 int res = SNTI_TRANSLATION_SUCCESS;
763 int xfer_len;
764
765 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
766 inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE;
767 inq_response[3] = INQ_SERIAL_NUMBER_LENGTH;
768 strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
769
770 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
771 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
772
773 return res;
774}
775
776static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
777 u8 *inq_response, int alloc_len)
778{
779 struct nvme_dev *dev = ns->dev;
780 dma_addr_t dma_addr;
781 void *mem;
782 struct nvme_id_ctrl *id_ctrl;
783 int res = SNTI_TRANSLATION_SUCCESS;
784 int nvme_sc;
785 u8 ieee[4];
786 int xfer_len;
787 __be32 tmp_id = cpu_to_be32(ns->ns_id);
788
789 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
790 &dma_addr, GFP_KERNEL);
791 if (mem == NULL) {
792 res = -ENOMEM;
793 goto out_dma;
794 }
795
796
797 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
798 res = nvme_trans_status_code(hdr, nvme_sc);
799 if (res)
800 goto out_free;
801 if (nvme_sc) {
802 res = nvme_sc;
803 goto out_free;
804 }
805 id_ctrl = mem;
806
807
808 ieee[0] = id_ctrl->ieee[0] << 4;
809 ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
810 ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
811 ieee[3] = id_ctrl->ieee[2] >> 4;
812
813 memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
814 inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;
815 inq_response[3] = 20;
816
817 inq_response[4] = 0x01;
818 inq_response[5] = 0x03;
819 inq_response[6] = 0x00;
820 inq_response[7] = 16;
821
822 inq_response[8] = 0x60 | ieee[3];
823 inq_response[9] = ieee[2];
824 inq_response[10] = ieee[1];
825 inq_response[11] = ieee[0];
826 inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
827 inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
828 inq_response[14] = dev->serial[0];
829 inq_response[15] = dev->serial[1];
830 inq_response[16] = dev->model[0];
831 inq_response[17] = dev->model[1];
832 memcpy(&inq_response[18], &tmp_id, sizeof(u32));
833
834
835 xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
836 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
837
838 out_free:
839 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
840 dma_addr);
841 out_dma:
842 return res;
843}
844
845static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
846 int alloc_len)
847{
848 u8 *inq_response;
849 int res = SNTI_TRANSLATION_SUCCESS;
850 int nvme_sc;
851 struct nvme_dev *dev = ns->dev;
852 dma_addr_t dma_addr;
853 void *mem;
854 struct nvme_id_ctrl *id_ctrl;
855 struct nvme_id_ns *id_ns;
856 int xfer_len;
857 u8 microcode = 0x80;
858 u8 spt;
859 u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
860 u8 grd_chk, app_chk, ref_chk, protect;
861 u8 uask_sup = 0x20;
862 u8 v_sup;
863 u8 luiclr = 0x01;
864
865 inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
866 if (inq_response == NULL) {
867 res = -ENOMEM;
868 goto out_mem;
869 }
870
871 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
872 &dma_addr, GFP_KERNEL);
873 if (mem == NULL) {
874 res = -ENOMEM;
875 goto out_dma;
876 }
877
878
879 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
880 res = nvme_trans_status_code(hdr, nvme_sc);
881 if (res)
882 goto out_free;
883 if (nvme_sc) {
884 res = nvme_sc;
885 goto out_free;
886 }
887 id_ns = mem;
888 spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
889 (id_ns->dps) ? (protect = 0x01) : (protect = 0);
890 grd_chk = protect << 2;
891 app_chk = protect << 1;
892 ref_chk = protect;
893
894
895 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
896 res = nvme_trans_status_code(hdr, nvme_sc);
897 if (res)
898 goto out_free;
899 if (nvme_sc) {
900 res = nvme_sc;
901 goto out_free;
902 }
903 id_ctrl = mem;
904 v_sup = id_ctrl->vwc;
905
906 memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
907 inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
908 inq_response[2] = 0x00;
909 inq_response[3] = 0x3C;
910 inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
911 inq_response[5] = uask_sup;
912 inq_response[6] = v_sup;
913 inq_response[7] = luiclr;
914 inq_response[8] = 0;
915 inq_response[9] = 0;
916
917 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
918 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
919
920 out_free:
921 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
922 dma_addr);
923 out_dma:
924 kfree(inq_response);
925 out_mem:
926 return res;
927}
928
929static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
930 int alloc_len)
931{
932 u8 *inq_response;
933 int res = SNTI_TRANSLATION_SUCCESS;
934 int xfer_len;
935
936 inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
937 if (inq_response == NULL) {
938 res = -ENOMEM;
939 goto out_mem;
940 }
941
942 inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE;
943 inq_response[2] = 0x00;
944 inq_response[3] = 0x3C;
945 inq_response[4] = 0x00;
946 inq_response[5] = 0x01;
947 inq_response[6] = 0x00;
948
949 xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
950 res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
951
952 kfree(inq_response);
953 out_mem:
954 return res;
955}
956
957
958
959static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
960 int alloc_len)
961{
962 int res = SNTI_TRANSLATION_SUCCESS;
963 int xfer_len;
964 u8 *log_response;
965
966 log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
967 if (log_response == NULL) {
968 res = -ENOMEM;
969 goto out_mem;
970 }
971
972 log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
973
974 log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
975 log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
976 log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
977 log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
978
979 xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
980 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
981
982 kfree(log_response);
983 out_mem:
984 return res;
985}
986
987static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
988 struct sg_io_hdr *hdr, int alloc_len)
989{
990 int res = SNTI_TRANSLATION_SUCCESS;
991 int xfer_len;
992 u8 *log_response;
993 struct nvme_command c;
994 struct nvme_dev *dev = ns->dev;
995 struct nvme_smart_log *smart_log;
996 dma_addr_t dma_addr;
997 void *mem;
998 u8 temp_c;
999 u16 temp_k;
1000
1001 log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
1002 if (log_response == NULL) {
1003 res = -ENOMEM;
1004 goto out_mem;
1005 }
1006
1007 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1008 sizeof(struct nvme_smart_log),
1009 &dma_addr, GFP_KERNEL);
1010 if (mem == NULL) {
1011 res = -ENOMEM;
1012 goto out_dma;
1013 }
1014
1015
1016 memset(&c, 0, sizeof(c));
1017 c.common.opcode = nvme_admin_get_log_page;
1018 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1019 c.common.prp1 = cpu_to_le64(dma_addr);
1020 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1021 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1022 res = nvme_submit_admin_cmd(dev, &c, NULL);
1023 if (res != NVME_SC_SUCCESS) {
1024 temp_c = LOG_TEMP_UNKNOWN;
1025 } else {
1026 smart_log = mem;
1027 temp_k = (smart_log->temperature[1] << 8) +
1028 (smart_log->temperature[0]);
1029 temp_c = temp_k - KELVIN_TEMP_FACTOR;
1030 }
1031
1032 log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
1033
1034 log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
1035
1036
1037 log_response[6] = 0x23;
1038 log_response[7] = 0x04;
1039
1040
1041 log_response[10] = temp_c;
1042
1043 xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
1044 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1045
1046 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
1047 mem, dma_addr);
1048 out_dma:
1049 kfree(log_response);
1050 out_mem:
1051 return res;
1052}
1053
1054static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1055 int alloc_len)
1056{
1057 int res = SNTI_TRANSLATION_SUCCESS;
1058 int xfer_len;
1059 u8 *log_response;
1060 struct nvme_command c;
1061 struct nvme_dev *dev = ns->dev;
1062 struct nvme_smart_log *smart_log;
1063 dma_addr_t dma_addr;
1064 void *mem;
1065 u32 feature_resp;
1066 u8 temp_c_cur, temp_c_thresh;
1067 u16 temp_k;
1068
1069 log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
1070 if (log_response == NULL) {
1071 res = -ENOMEM;
1072 goto out_mem;
1073 }
1074
1075 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1076 sizeof(struct nvme_smart_log),
1077 &dma_addr, GFP_KERNEL);
1078 if (mem == NULL) {
1079 res = -ENOMEM;
1080 goto out_dma;
1081 }
1082
1083
1084 memset(&c, 0, sizeof(c));
1085 c.common.opcode = nvme_admin_get_log_page;
1086 c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1087 c.common.prp1 = cpu_to_le64(dma_addr);
1088 c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1089 BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1090 res = nvme_submit_admin_cmd(dev, &c, NULL);
1091 if (res != NVME_SC_SUCCESS) {
1092 temp_c_cur = LOG_TEMP_UNKNOWN;
1093 } else {
1094 smart_log = mem;
1095 temp_k = (smart_log->temperature[1] << 8) +
1096 (smart_log->temperature[0]);
1097 temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
1098 }
1099
1100
1101 res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
1102 &feature_resp);
1103 if (res != NVME_SC_SUCCESS)
1104 temp_c_thresh = LOG_TEMP_UNKNOWN;
1105 else
1106 temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
1107
1108 log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
1109
1110 log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
1111
1112
1113 log_response[6] = 0x01;
1114 log_response[7] = 0x02;
1115
1116 log_response[9] = temp_c_cur;
1117
1118 log_response[11] = 0x01;
1119 log_response[12] = 0x01;
1120 log_response[13] = 0x02;
1121
1122 log_response[15] = temp_c_thresh;
1123
1124 xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
1125 res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1126
1127 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
1128 mem, dma_addr);
1129 out_dma:
1130 kfree(log_response);
1131 out_mem:
1132 return res;
1133}
1134
1135
1136
1137static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
1138 u16 mode_data_length, u16 blk_desc_len)
1139{
1140
1141 if ((cdb10 && len < 8) || (!cdb10 && len < 4))
1142 return SNTI_INTERNAL_ERROR;
1143
1144 if (cdb10) {
1145 resp[0] = (mode_data_length & 0xFF00) >> 8;
1146 resp[1] = (mode_data_length & 0x00FF);
1147
1148 resp[4] = llbaa;
1149 resp[5] = RESERVED_FIELD;
1150 resp[6] = (blk_desc_len & 0xFF00) >> 8;
1151 resp[7] = (blk_desc_len & 0x00FF);
1152 } else {
1153 resp[0] = (mode_data_length & 0x00FF);
1154
1155 resp[3] = (blk_desc_len & 0x00FF);
1156 }
1157
1158 return SNTI_TRANSLATION_SUCCESS;
1159}
1160
1161static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1162 u8 *resp, int len, u8 llbaa)
1163{
1164 int res = SNTI_TRANSLATION_SUCCESS;
1165 int nvme_sc;
1166 struct nvme_dev *dev = ns->dev;
1167 dma_addr_t dma_addr;
1168 void *mem;
1169 struct nvme_id_ns *id_ns;
1170 u8 flbas;
1171 u32 lba_length;
1172
1173 if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
1174 return SNTI_INTERNAL_ERROR;
1175 else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
1176 return SNTI_INTERNAL_ERROR;
1177
1178 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1179 &dma_addr, GFP_KERNEL);
1180 if (mem == NULL) {
1181 res = -ENOMEM;
1182 goto out;
1183 }
1184
1185
1186 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1187 res = nvme_trans_status_code(hdr, nvme_sc);
1188 if (res)
1189 goto out_dma;
1190 if (nvme_sc) {
1191 res = nvme_sc;
1192 goto out_dma;
1193 }
1194 id_ns = mem;
1195 flbas = (id_ns->flbas) & 0x0F;
1196 lba_length = (1 << (id_ns->lbaf[flbas].ds));
1197
1198 if (llbaa == 0) {
1199 __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
1200
1201 __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
1202
1203 memcpy(resp, &tmp_cap, sizeof(u32));
1204 memcpy(&resp[4], &tmp_len, sizeof(u32));
1205 } else {
1206 __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
1207 __be32 tmp_len = cpu_to_be32(lba_length);
1208
1209 memcpy(resp, &tmp_cap, sizeof(u64));
1210
1211 memcpy(&resp[12], &tmp_len, sizeof(u32));
1212 }
1213
1214 out_dma:
1215 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
1216 dma_addr);
1217 out:
1218 return res;
1219}
1220
1221static int nvme_trans_fill_control_page(struct nvme_ns *ns,
1222 struct sg_io_hdr *hdr, u8 *resp,
1223 int len)
1224{
1225 if (len < MODE_PAGE_CONTROL_LEN)
1226 return SNTI_INTERNAL_ERROR;
1227
1228 resp[0] = MODE_PAGE_CONTROL;
1229 resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
1230 resp[2] = 0x0E;
1231
1232 resp[3] = 0x12;
1233
1234 resp[5] = 0x40;
1235
1236 resp[8] = 0xFF;
1237 resp[9] = 0xFF;
1238
1239
1240 return SNTI_TRANSLATION_SUCCESS;
1241}
1242
1243static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1244 struct sg_io_hdr *hdr,
1245 u8 *resp, int len)
1246{
1247 int res = SNTI_TRANSLATION_SUCCESS;
1248 int nvme_sc;
1249 struct nvme_dev *dev = ns->dev;
1250 u32 feature_resp;
1251 u8 vwc;
1252
1253 if (len < MODE_PAGE_CACHING_LEN)
1254 return SNTI_INTERNAL_ERROR;
1255
1256 nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
1257 &feature_resp);
1258 res = nvme_trans_status_code(hdr, nvme_sc);
1259 if (res)
1260 goto out;
1261 if (nvme_sc) {
1262 res = nvme_sc;
1263 goto out;
1264 }
1265 vwc = feature_resp & 0x00000001;
1266
1267 resp[0] = MODE_PAGE_CACHING;
1268 resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
1269 resp[2] = vwc << 2;
1270
1271 out:
1272 return res;
1273}
1274
1275static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
1276 struct sg_io_hdr *hdr, u8 *resp,
1277 int len)
1278{
1279 int res = SNTI_TRANSLATION_SUCCESS;
1280
1281 if (len < MODE_PAGE_POW_CND_LEN)
1282 return SNTI_INTERNAL_ERROR;
1283
1284 resp[0] = MODE_PAGE_POWER_CONDITION;
1285 resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
1286
1287
1288 return res;
1289}
1290
1291static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
1292 struct sg_io_hdr *hdr, u8 *resp,
1293 int len)
1294{
1295 int res = SNTI_TRANSLATION_SUCCESS;
1296
1297 if (len < MODE_PAGE_INF_EXC_LEN)
1298 return SNTI_INTERNAL_ERROR;
1299
1300 resp[0] = MODE_PAGE_INFO_EXCEP;
1301 resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
1302 resp[2] = 0x88;
1303
1304
1305 return res;
1306}
1307
1308static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1309 u8 *resp, int len)
1310{
1311 int res = SNTI_TRANSLATION_SUCCESS;
1312 u16 mode_pages_offset_1 = 0;
1313 u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
1314
1315 mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
1316 mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
1317 mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
1318
1319 res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
1320 MODE_PAGE_CACHING_LEN);
1321 if (res != SNTI_TRANSLATION_SUCCESS)
1322 goto out;
1323 res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
1324 MODE_PAGE_CONTROL_LEN);
1325 if (res != SNTI_TRANSLATION_SUCCESS)
1326 goto out;
1327 res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
1328 MODE_PAGE_POW_CND_LEN);
1329 if (res != SNTI_TRANSLATION_SUCCESS)
1330 goto out;
1331 res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
1332 MODE_PAGE_INF_EXC_LEN);
1333 if (res != SNTI_TRANSLATION_SUCCESS)
1334 goto out;
1335
1336 out:
1337 return res;
1338}
1339
1340static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
1341{
1342 if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
1343
1344 return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
1345 } else {
1346 return 0;
1347 }
1348}
1349
1350static int nvme_trans_mode_page_create(struct nvme_ns *ns,
1351 struct sg_io_hdr *hdr, u8 *cmd,
1352 u16 alloc_len, u8 cdb10,
1353 int (*mode_page_fill_func)
1354 (struct nvme_ns *,
1355 struct sg_io_hdr *hdr, u8 *, int),
1356 u16 mode_pages_tot_len)
1357{
1358 int res = SNTI_TRANSLATION_SUCCESS;
1359 int xfer_len;
1360 u8 *response;
1361 u8 dbd, llbaa;
1362 u16 resp_size;
1363 int mph_size;
1364 u16 mode_pages_offset_1;
1365 u16 blk_desc_len, blk_desc_offset, mode_data_length;
1366
1367 dbd = GET_MODE_SENSE_DBD(cmd);
1368 llbaa = GET_MODE_SENSE_LLBAA(cmd);
1369 mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
1370 blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
1371
1372 resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
1373
1374 mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
1375
1376 blk_desc_offset = mph_size;
1377 mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
1378
1379 response = kzalloc(resp_size, GFP_KERNEL);
1380 if (response == NULL) {
1381 res = -ENOMEM;
1382 goto out_mem;
1383 }
1384
1385 res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
1386 llbaa, mode_data_length, blk_desc_len);
1387 if (res != SNTI_TRANSLATION_SUCCESS)
1388 goto out_free;
1389 if (blk_desc_len > 0) {
1390 res = nvme_trans_fill_blk_desc(ns, hdr,
1391 &response[blk_desc_offset],
1392 blk_desc_len, llbaa);
1393 if (res != SNTI_TRANSLATION_SUCCESS)
1394 goto out_free;
1395 }
1396 res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
1397 mode_pages_tot_len);
1398 if (res != SNTI_TRANSLATION_SUCCESS)
1399 goto out_free;
1400
1401 xfer_len = min(alloc_len, resp_size);
1402 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
1403
1404 out_free:
1405 kfree(response);
1406 out_mem:
1407 return res;
1408}
1409
1410
1411
1412static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
1413 u8 cdb16)
1414{
1415 u8 flbas;
1416 u32 lba_length;
1417 u64 rlba;
1418 u8 prot_en;
1419 u8 p_type_lut[4] = {0, 0, 1, 2};
1420 __be64 tmp_rlba;
1421 __be32 tmp_rlba_32;
1422 __be32 tmp_len;
1423
1424 flbas = (id_ns->flbas) & 0x0F;
1425 lba_length = (1 << (id_ns->lbaf[flbas].ds));
1426 rlba = le64_to_cpup(&id_ns->nsze) - 1;
1427 (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
1428
1429 if (!cdb16) {
1430 if (rlba > 0xFFFFFFFF)
1431 rlba = 0xFFFFFFFF;
1432 tmp_rlba_32 = cpu_to_be32(rlba);
1433 tmp_len = cpu_to_be32(lba_length);
1434 memcpy(response, &tmp_rlba_32, sizeof(u32));
1435 memcpy(&response[4], &tmp_len, sizeof(u32));
1436 } else {
1437 tmp_rlba = cpu_to_be64(rlba);
1438 tmp_len = cpu_to_be32(lba_length);
1439 memcpy(response, &tmp_rlba, sizeof(u64));
1440 memcpy(&response[8], &tmp_len, sizeof(u32));
1441 response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
1442
1443
1444
1445 }
1446}
1447
1448
1449
1450static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1451 u8 pc, u8 pcmod, u8 start)
1452{
1453 int res = SNTI_TRANSLATION_SUCCESS;
1454 int nvme_sc;
1455 struct nvme_dev *dev = ns->dev;
1456 dma_addr_t dma_addr;
1457 void *mem;
1458 struct nvme_id_ctrl *id_ctrl;
1459 int lowest_pow_st;
1460 unsigned ps_desired = 0;
1461
1462
1463 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1464 sizeof(struct nvme_id_ctrl),
1465 &dma_addr, GFP_KERNEL);
1466 if (mem == NULL) {
1467 res = -ENOMEM;
1468 goto out;
1469 }
1470 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
1471 res = nvme_trans_status_code(hdr, nvme_sc);
1472 if (res)
1473 goto out_dma;
1474 if (nvme_sc) {
1475 res = nvme_sc;
1476 goto out_dma;
1477 }
1478 id_ctrl = mem;
1479 lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
1480
1481 switch (pc) {
1482 case NVME_POWER_STATE_START_VALID:
1483
1484 if (pcmod == 0 && start == 0x1)
1485 ps_desired = POWER_STATE_0;
1486 if (pcmod == 0 && start == 0x0)
1487 ps_desired = lowest_pow_st;
1488 break;
1489 case NVME_POWER_STATE_ACTIVE:
1490
1491 if (pcmod == 0)
1492 ps_desired = POWER_STATE_0;
1493 break;
1494 case NVME_POWER_STATE_IDLE:
1495
1496 if (pcmod == 0x0)
1497 ps_desired = POWER_STATE_1;
1498 else if (pcmod == 0x1)
1499 ps_desired = POWER_STATE_2;
1500 else if (pcmod == 0x2)
1501 ps_desired = POWER_STATE_3;
1502 break;
1503 case NVME_POWER_STATE_STANDBY:
1504
1505 if (pcmod == 0x0)
1506 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
1507 else if (pcmod == 0x1)
1508 ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
1509 break;
1510 case NVME_POWER_STATE_LU_CONTROL:
1511 default:
1512 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1513 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1514 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1515 break;
1516 }
1517 nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
1518 NULL);
1519 res = nvme_trans_status_code(hdr, nvme_sc);
1520 if (res)
1521 goto out_dma;
1522 if (nvme_sc)
1523 res = nvme_sc;
1524 out_dma:
1525 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
1526 dma_addr);
1527 out:
1528 return res;
1529}
1530
1531
1532
1533
1534static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1535 u8 opcode, u32 tot_len, u32 offset,
1536 u8 buffer_id)
1537{
1538 int res = SNTI_TRANSLATION_SUCCESS;
1539 int nvme_sc;
1540 struct nvme_dev *dev = ns->dev;
1541 struct nvme_command c;
1542 struct nvme_iod *iod = NULL;
1543 unsigned length;
1544
1545 memset(&c, 0, sizeof(c));
1546 c.common.opcode = opcode;
1547 if (opcode == nvme_admin_download_fw) {
1548 if (hdr->iovec_count > 0) {
1549
1550 res = nvme_trans_completion(hdr,
1551 SAM_STAT_CHECK_CONDITION,
1552 ILLEGAL_REQUEST,
1553 SCSI_ASC_INVALID_CDB,
1554 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1555 goto out;
1556 }
1557 iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
1558 (unsigned long)hdr->dxferp, tot_len);
1559 if (IS_ERR(iod)) {
1560 res = PTR_ERR(iod);
1561 goto out;
1562 }
1563 length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
1564 if (length != tot_len) {
1565 res = -ENOMEM;
1566 goto out_unmap;
1567 }
1568
1569 c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1570 c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
1571 c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1572 c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
1573 } else if (opcode == nvme_admin_activate_fw) {
1574 u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV;
1575 c.common.cdw10[0] = cpu_to_le32(cdw10);
1576 }
1577
1578 nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
1579 res = nvme_trans_status_code(hdr, nvme_sc);
1580 if (res)
1581 goto out_unmap;
1582 if (nvme_sc)
1583 res = nvme_sc;
1584
1585 out_unmap:
1586 if (opcode == nvme_admin_download_fw) {
1587 nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
1588 nvme_free_iod(dev, iod);
1589 }
1590 out:
1591 return res;
1592}
1593
1594
1595
1596static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1597 u16 *bd_len, u8 *llbaa)
1598{
1599 if (cdb10) {
1600
1601 *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1602 parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1603 *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
1604 MODE_SELECT_10_LLBAA_MASK;
1605 } else {
1606
1607 *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
1608 }
1609}
1610
1611static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1612 u16 idx, u16 bd_len, u8 llbaa)
1613{
1614 u16 bd_num;
1615
1616 bd_num = bd_len / ((llbaa == 0) ?
1617 SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
1618
1619
1620 if (llbaa == 0) {
1621
1622 ns->mode_select_num_blocks =
1623 (parm_list[idx + 1] << 16) +
1624 (parm_list[idx + 2] << 8) +
1625 (parm_list[idx + 3]);
1626
1627 ns->mode_select_block_len =
1628 (parm_list[idx + 5] << 16) +
1629 (parm_list[idx + 6] << 8) +
1630 (parm_list[idx + 7]);
1631 } else {
1632
1633 ns->mode_select_num_blocks =
1634 (((u64)parm_list[idx + 0]) << 56) +
1635 (((u64)parm_list[idx + 1]) << 48) +
1636 (((u64)parm_list[idx + 2]) << 40) +
1637 (((u64)parm_list[idx + 3]) << 32) +
1638 (((u64)parm_list[idx + 4]) << 24) +
1639 (((u64)parm_list[idx + 5]) << 16) +
1640 (((u64)parm_list[idx + 6]) << 8) +
1641 ((u64)parm_list[idx + 7]);
1642
1643 ns->mode_select_block_len =
1644 (parm_list[idx + 12] << 24) +
1645 (parm_list[idx + 13] << 16) +
1646 (parm_list[idx + 14] << 8) +
1647 (parm_list[idx + 15]);
1648 }
1649}
1650
1651static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1652 u8 *mode_page, u8 page_code)
1653{
1654 int res = SNTI_TRANSLATION_SUCCESS;
1655 int nvme_sc;
1656 struct nvme_dev *dev = ns->dev;
1657 unsigned dword11;
1658
1659 switch (page_code) {
1660 case MODE_PAGE_CACHING:
1661 dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1662 nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
1663 0, NULL);
1664 res = nvme_trans_status_code(hdr, nvme_sc);
1665 if (res)
1666 break;
1667 if (nvme_sc) {
1668 res = nvme_sc;
1669 break;
1670 }
1671 break;
1672 case MODE_PAGE_CONTROL:
1673 break;
1674 case MODE_PAGE_POWER_CONDITION:
1675
1676 if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
1677 res = nvme_trans_completion(hdr,
1678 SAM_STAT_CHECK_CONDITION,
1679 ILLEGAL_REQUEST,
1680 SCSI_ASC_INVALID_PARAMETER,
1681 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1682 if (!res)
1683 res = SNTI_INTERNAL_ERROR;
1684 break;
1685 }
1686 break;
1687 default:
1688 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1689 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1690 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1691 if (!res)
1692 res = SNTI_INTERNAL_ERROR;
1693 break;
1694 }
1695
1696 return res;
1697}
1698
1699static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1700 u8 *cmd, u16 parm_list_len, u8 pf,
1701 u8 sp, u8 cdb10)
1702{
1703 int res = SNTI_TRANSLATION_SUCCESS;
1704 u8 *parm_list;
1705 u16 bd_len;
1706 u8 llbaa = 0;
1707 u16 index, saved_index;
1708 u8 page_code;
1709 u16 mp_size;
1710
1711
1712 parm_list = kmalloc(parm_list_len, GFP_KERNEL);
1713 if (parm_list == NULL) {
1714 res = -ENOMEM;
1715 goto out;
1716 }
1717
1718 res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
1719 if (res != SNTI_TRANSLATION_SUCCESS)
1720 goto out_mem;
1721
1722 nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
1723 index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
1724
1725 if (bd_len != 0) {
1726
1727 nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
1728 index += bd_len;
1729 }
1730 saved_index = index;
1731
1732
1733
1734 do {
1735 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1736 mp_size = parm_list[index + 1] + 2;
1737 if ((page_code != MODE_PAGE_CACHING) &&
1738 (page_code != MODE_PAGE_CONTROL) &&
1739 (page_code != MODE_PAGE_POWER_CONDITION)) {
1740 res = nvme_trans_completion(hdr,
1741 SAM_STAT_CHECK_CONDITION,
1742 ILLEGAL_REQUEST,
1743 SCSI_ASC_INVALID_CDB,
1744 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1745 goto out_mem;
1746 }
1747 index += mp_size;
1748 } while (index < parm_list_len);
1749
1750
1751 index = saved_index;
1752 do {
1753 page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1754 mp_size = parm_list[index + 1] + 2;
1755 res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
1756 page_code);
1757 if (res != SNTI_TRANSLATION_SUCCESS)
1758 break;
1759 index += mp_size;
1760 } while (index < parm_list_len);
1761
1762 out_mem:
1763 kfree(parm_list);
1764 out:
1765 return res;
1766}
1767
1768
1769
1770static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1771 struct sg_io_hdr *hdr)
1772{
1773 int res = SNTI_TRANSLATION_SUCCESS;
1774 int nvme_sc;
1775 struct nvme_dev *dev = ns->dev;
1776 dma_addr_t dma_addr;
1777 void *mem;
1778 struct nvme_id_ns *id_ns;
1779 u8 flbas;
1780
1781
1782
1783
1784
1785
1786
1787
1788 if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
1789 mem = dma_alloc_coherent(&dev->pci_dev->dev,
1790 sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
1791 if (mem == NULL) {
1792 res = -ENOMEM;
1793 goto out;
1794 }
1795
1796 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1797 res = nvme_trans_status_code(hdr, nvme_sc);
1798 if (res)
1799 goto out_dma;
1800 if (nvme_sc) {
1801 res = nvme_sc;
1802 goto out_dma;
1803 }
1804 id_ns = mem;
1805
1806 if (ns->mode_select_num_blocks == 0)
1807 ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
1808 if (ns->mode_select_block_len == 0) {
1809 flbas = (id_ns->flbas) & 0x0F;
1810 ns->mode_select_block_len =
1811 (1 << (id_ns->lbaf[flbas].ds));
1812 }
1813 out_dma:
1814 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1815 mem, dma_addr);
1816 }
1817 out:
1818 return res;
1819}
1820
1821static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
1822 u8 format_prot_info, u8 *nvme_pf_code)
1823{
1824 int res = SNTI_TRANSLATION_SUCCESS;
1825 u8 *parm_list;
1826 u8 pf_usage, pf_code;
1827
1828 parm_list = kmalloc(len, GFP_KERNEL);
1829 if (parm_list == NULL) {
1830 res = -ENOMEM;
1831 goto out;
1832 }
1833 res = nvme_trans_copy_from_user(hdr, parm_list, len);
1834 if (res != SNTI_TRANSLATION_SUCCESS)
1835 goto out_mem;
1836
1837 if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
1838 FORMAT_UNIT_IMMED_MASK) != 0) {
1839 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1840 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1841 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1842 goto out_mem;
1843 }
1844
1845 if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
1846 (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
1847 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1848 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1849 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1850 goto out_mem;
1851 }
1852 pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
1853 FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
1854 pf_code = (pf_usage << 2) | format_prot_info;
1855 switch (pf_code) {
1856 case 0:
1857 *nvme_pf_code = 0;
1858 break;
1859 case 2:
1860 *nvme_pf_code = 1;
1861 break;
1862 case 3:
1863 *nvme_pf_code = 2;
1864 break;
1865 case 7:
1866 *nvme_pf_code = 3;
1867 break;
1868 default:
1869 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1870 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1871 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1872 break;
1873 }
1874
1875 out_mem:
1876 kfree(parm_list);
1877 out:
1878 return res;
1879}
1880
1881static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1882 u8 prot_info)
1883{
1884 int res = SNTI_TRANSLATION_SUCCESS;
1885 int nvme_sc;
1886 struct nvme_dev *dev = ns->dev;
1887 dma_addr_t dma_addr;
1888 void *mem;
1889 struct nvme_id_ns *id_ns;
1890 u8 i;
1891 u8 flbas, nlbaf;
1892 u8 selected_lbaf = 0xFF;
1893 u32 cdw10 = 0;
1894 struct nvme_command c;
1895
1896
1897 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1898 &dma_addr, GFP_KERNEL);
1899 if (mem == NULL) {
1900 res = -ENOMEM;
1901 goto out;
1902 }
1903
1904 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1905 res = nvme_trans_status_code(hdr, nvme_sc);
1906 if (res)
1907 goto out_dma;
1908 if (nvme_sc) {
1909 res = nvme_sc;
1910 goto out_dma;
1911 }
1912 id_ns = mem;
1913 flbas = (id_ns->flbas) & 0x0F;
1914 nlbaf = id_ns->nlbaf;
1915
1916 for (i = 0; i < nlbaf; i++) {
1917 if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
1918 selected_lbaf = i;
1919 break;
1920 }
1921 }
1922 if (selected_lbaf > 0x0F) {
1923 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1924 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1925 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1926 }
1927 if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
1928 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1929 ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1930 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1931 }
1932
1933 cdw10 |= prot_info << 5;
1934 cdw10 |= selected_lbaf & 0x0F;
1935 memset(&c, 0, sizeof(c));
1936 c.format.opcode = nvme_admin_format_nvm;
1937 c.format.nsid = cpu_to_le32(ns->ns_id);
1938 c.format.cdw10 = cpu_to_le32(cdw10);
1939
1940 nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
1941 res = nvme_trans_status_code(hdr, nvme_sc);
1942 if (res)
1943 goto out_dma;
1944 if (nvme_sc)
1945 res = nvme_sc;
1946
1947 out_dma:
1948 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
1949 dma_addr);
1950 out:
1951 return res;
1952}
1953
1954
1955
1956static inline void nvme_trans_get_io_cdb6(u8 *cmd,
1957 struct nvme_trans_io_cdb *cdb_info)
1958{
1959 cdb_info->fua = 0;
1960 cdb_info->prot_info = 0;
1961 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
1962 IO_6_CDB_LBA_MASK;
1963 cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
1964
1965
1966 if (cdb_info->xfer_len == 0)
1967 cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
1968}
1969
1970static inline void nvme_trans_get_io_cdb10(u8 *cmd,
1971 struct nvme_trans_io_cdb *cdb_info)
1972{
1973 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
1974 IO_CDB_FUA_MASK;
1975 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
1976 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
1977 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
1978 cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
1979}
1980
1981static inline void nvme_trans_get_io_cdb12(u8 *cmd,
1982 struct nvme_trans_io_cdb *cdb_info)
1983{
1984 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
1985 IO_CDB_FUA_MASK;
1986 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
1987 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
1988 cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
1989 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
1990}
1991
1992static inline void nvme_trans_get_io_cdb16(u8 *cmd,
1993 struct nvme_trans_io_cdb *cdb_info)
1994{
1995 cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
1996 IO_CDB_FUA_MASK;
1997 cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
1998 IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
1999 cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
2000 cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
2001}
2002
2003static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
2004 struct nvme_trans_io_cdb *cdb_info,
2005 u32 max_blocks)
2006{
2007
2008 if (hdr->iovec_count > 0)
2009 return hdr->iovec_count;
2010 else if (cdb_info->xfer_len > max_blocks)
2011 return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
2012 else
2013 return 1;
2014}
2015
2016static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
2017 struct nvme_trans_io_cdb *cdb_info)
2018{
2019 u16 control = 0;
2020
2021
2022
2023 if (cdb_info->fua > 0)
2024 control |= NVME_RW_FUA;
2025
2026 return control;
2027}
2028
2029static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2030 struct nvme_trans_io_cdb *cdb_info, u8 is_write)
2031{
2032 int res = SNTI_TRANSLATION_SUCCESS;
2033 int nvme_sc;
2034 struct nvme_dev *dev = ns->dev;
2035 u32 num_cmds;
2036 struct nvme_iod *iod;
2037 u64 unit_len;
2038 u64 unit_num_blocks;
2039 u32 retcode;
2040 u32 i = 0;
2041 u64 nvme_offset = 0;
2042 void __user *next_mapping_addr;
2043 struct nvme_command c;
2044 u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
2045 u16 control;
2046 u32 max_blocks = queue_max_hw_sectors(ns->queue);
2047
2048 num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059 for (i = 0; i < num_cmds; i++) {
2060 memset(&c, 0, sizeof(c));
2061 if (hdr->iovec_count > 0) {
2062 struct sg_iovec sgl;
2063
2064 retcode = copy_from_user(&sgl, hdr->dxferp +
2065 i * sizeof(struct sg_iovec),
2066 sizeof(struct sg_iovec));
2067 if (retcode)
2068 return -EFAULT;
2069 unit_len = sgl.iov_len;
2070 unit_num_blocks = unit_len >> ns->lba_shift;
2071 next_mapping_addr = sgl.iov_base;
2072 } else {
2073 unit_num_blocks = min((u64)max_blocks,
2074 (cdb_info->xfer_len - nvme_offset));
2075 unit_len = unit_num_blocks << ns->lba_shift;
2076 next_mapping_addr = hdr->dxferp +
2077 ((1 << ns->lba_shift) * nvme_offset);
2078 }
2079
2080 c.rw.opcode = opcode;
2081 c.rw.nsid = cpu_to_le32(ns->ns_id);
2082 c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
2083 c.rw.length = cpu_to_le16(unit_num_blocks - 1);
2084 control = nvme_trans_io_get_control(ns, cdb_info);
2085 c.rw.control = cpu_to_le16(control);
2086
2087 iod = nvme_map_user_pages(dev,
2088 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2089 (unsigned long)next_mapping_addr, unit_len);
2090 if (IS_ERR(iod)) {
2091 res = PTR_ERR(iod);
2092 goto out;
2093 }
2094 retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
2095 if (retcode != unit_len) {
2096 nvme_unmap_user_pages(dev,
2097 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2098 iod);
2099 nvme_free_iod(dev, iod);
2100 res = -ENOMEM;
2101 goto out;
2102 }
2103 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
2104 c.rw.prp2 = cpu_to_le64(iod->first_dma);
2105
2106 nvme_offset += unit_num_blocks;
2107
2108 nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
2109 if (nvme_sc != NVME_SC_SUCCESS) {
2110 nvme_unmap_user_pages(dev,
2111 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2112 iod);
2113 nvme_free_iod(dev, iod);
2114 res = nvme_trans_status_code(hdr, nvme_sc);
2115 goto out;
2116 }
2117 nvme_unmap_user_pages(dev,
2118 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2119 iod);
2120 nvme_free_iod(dev, iod);
2121 }
2122 res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2123
2124 out:
2125 return res;
2126}
2127
2128
2129
2130
2131static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
2132 u8 *cmd)
2133{
2134 int res = SNTI_TRANSLATION_SUCCESS;
2135 struct nvme_trans_io_cdb cdb_info;
2136 u8 opcode = cmd[0];
2137 u64 xfer_bytes;
2138 u64 sum_iov_len = 0;
2139 struct sg_iovec sgl;
2140 int i;
2141 size_t not_copied;
2142
2143
2144 switch (opcode) {
2145 case WRITE_6:
2146 case READ_6:
2147 nvme_trans_get_io_cdb6(cmd, &cdb_info);
2148 break;
2149 case WRITE_10:
2150 case READ_10:
2151 nvme_trans_get_io_cdb10(cmd, &cdb_info);
2152 break;
2153 case WRITE_12:
2154 case READ_12:
2155 nvme_trans_get_io_cdb12(cmd, &cdb_info);
2156 break;
2157 case WRITE_16:
2158 case READ_16:
2159 nvme_trans_get_io_cdb16(cmd, &cdb_info);
2160 break;
2161 default:
2162
2163 res = SNTI_INTERNAL_ERROR;
2164 goto out;
2165 }
2166
2167
2168 if (hdr->iovec_count > 0) {
2169 for (i = 0; i < hdr->iovec_count; i++) {
2170 not_copied = copy_from_user(&sgl, hdr->dxferp +
2171 i * sizeof(struct sg_iovec),
2172 sizeof(struct sg_iovec));
2173 if (not_copied)
2174 return -EFAULT;
2175 sum_iov_len += sgl.iov_len;
2176
2177 if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
2178 res = nvme_trans_completion(hdr,
2179 SAM_STAT_CHECK_CONDITION,
2180 ILLEGAL_REQUEST,
2181 SCSI_ASC_INVALID_PARAMETER,
2182 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2183 goto out;
2184 }
2185 }
2186 } else {
2187 sum_iov_len = hdr->dxfer_len;
2188 }
2189
2190
2191 xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
2192
2193
2194 if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
2195 res = -EINVAL;
2196 goto out;
2197 }
2198
2199
2200 if (cdb_info.xfer_len == 0)
2201 goto out;
2202
2203
2204 res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
2205 if (res != SNTI_TRANSLATION_SUCCESS)
2206 goto out;
2207
2208 out:
2209 return res;
2210}
2211
2212static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2213 u8 *cmd)
2214{
2215 int res = SNTI_TRANSLATION_SUCCESS;
2216 u8 evpd;
2217 u8 page_code;
2218 int alloc_len;
2219 u8 *inq_response;
2220
2221 evpd = GET_INQ_EVPD_BIT(cmd);
2222 page_code = GET_INQ_PAGE_CODE(cmd);
2223 alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2224
2225 inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
2226 if (inq_response == NULL) {
2227 res = -ENOMEM;
2228 goto out_mem;
2229 }
2230
2231 if (evpd == 0) {
2232 if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
2233 res = nvme_trans_standard_inquiry_page(ns, hdr,
2234 inq_response, alloc_len);
2235 } else {
2236 res = nvme_trans_completion(hdr,
2237 SAM_STAT_CHECK_CONDITION,
2238 ILLEGAL_REQUEST,
2239 SCSI_ASC_INVALID_CDB,
2240 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2241 }
2242 } else {
2243 switch (page_code) {
2244 case VPD_SUPPORTED_PAGES:
2245 res = nvme_trans_supported_vpd_pages(ns, hdr,
2246 inq_response, alloc_len);
2247 break;
2248 case VPD_SERIAL_NUMBER:
2249 res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
2250 alloc_len);
2251 break;
2252 case VPD_DEVICE_IDENTIFIERS:
2253 res = nvme_trans_device_id_page(ns, hdr, inq_response,
2254 alloc_len);
2255 break;
2256 case VPD_EXTENDED_INQUIRY:
2257 res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
2258 break;
2259 case VPD_BLOCK_DEV_CHARACTERISTICS:
2260 res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
2261 break;
2262 default:
2263 res = nvme_trans_completion(hdr,
2264 SAM_STAT_CHECK_CONDITION,
2265 ILLEGAL_REQUEST,
2266 SCSI_ASC_INVALID_CDB,
2267 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2268 break;
2269 }
2270 }
2271 kfree(inq_response);
2272 out_mem:
2273 return res;
2274}
2275
2276static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2277 u8 *cmd)
2278{
2279 int res = SNTI_TRANSLATION_SUCCESS;
2280 u16 alloc_len;
2281 u8 sp;
2282 u8 pc;
2283 u8 page_code;
2284
2285 sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
2286 if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
2287 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2288 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2289 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2290 goto out;
2291 }
2292 pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
2293 page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
2294 pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
2295 if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
2296 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2297 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2298 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2299 goto out;
2300 }
2301 alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
2302 switch (page_code) {
2303 case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
2304 res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
2305 break;
2306 case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
2307 res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
2308 break;
2309 case LOG_PAGE_TEMPERATURE_PAGE:
2310 res = nvme_trans_log_temperature(ns, hdr, alloc_len);
2311 break;
2312 default:
2313 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2314 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2315 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2316 break;
2317 }
2318
2319 out:
2320 return res;
2321}
2322
2323static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2324 u8 *cmd)
2325{
2326 int res = SNTI_TRANSLATION_SUCCESS;
2327 u8 cdb10 = 0;
2328 u16 parm_list_len;
2329 u8 page_format;
2330 u8 save_pages;
2331
2332 page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
2333 page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
2334
2335 save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
2336 save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
2337
2338 if (GET_OPCODE(cmd) == MODE_SELECT) {
2339 parm_list_len = GET_U8_FROM_CDB(cmd,
2340 MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
2341 } else {
2342 parm_list_len = GET_U16_FROM_CDB(cmd,
2343 MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
2344 cdb10 = 1;
2345 }
2346
2347 if (parm_list_len != 0) {
2348
2349
2350
2351
2352 res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
2353 page_format, save_pages, cdb10);
2354 }
2355
2356 return res;
2357}
2358
2359static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2360 u8 *cmd)
2361{
2362 int res = SNTI_TRANSLATION_SUCCESS;
2363 u16 alloc_len;
2364 u8 cdb10 = 0;
2365 u8 page_code;
2366 u8 pc;
2367
2368 if (GET_OPCODE(cmd) == MODE_SENSE) {
2369 alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
2370 } else {
2371 alloc_len = GET_U16_FROM_CDB(cmd,
2372 MODE_SENSE10_ALLOC_LEN_OFFSET);
2373 cdb10 = 1;
2374 }
2375
2376 pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
2377 MODE_SENSE_PAGE_CONTROL_MASK;
2378 if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
2379 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2380 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2381 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2382 goto out;
2383 }
2384
2385 page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
2386 MODE_SENSE_PAGE_CODE_MASK;
2387 switch (page_code) {
2388 case MODE_PAGE_CACHING:
2389 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2390 cdb10,
2391 &nvme_trans_fill_caching_page,
2392 MODE_PAGE_CACHING_LEN);
2393 break;
2394 case MODE_PAGE_CONTROL:
2395 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2396 cdb10,
2397 &nvme_trans_fill_control_page,
2398 MODE_PAGE_CONTROL_LEN);
2399 break;
2400 case MODE_PAGE_POWER_CONDITION:
2401 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2402 cdb10,
2403 &nvme_trans_fill_pow_cnd_page,
2404 MODE_PAGE_POW_CND_LEN);
2405 break;
2406 case MODE_PAGE_INFO_EXCEP:
2407 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2408 cdb10,
2409 &nvme_trans_fill_inf_exc_page,
2410 MODE_PAGE_INF_EXC_LEN);
2411 break;
2412 case MODE_PAGE_RETURN_ALL:
2413 res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2414 cdb10,
2415 &nvme_trans_fill_all_pages,
2416 MODE_PAGE_ALL_LEN);
2417 break;
2418 default:
2419 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2420 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2421 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2422 break;
2423 }
2424
2425 out:
2426 return res;
2427}
2428
2429static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2430 u8 *cmd)
2431{
2432 int res = SNTI_TRANSLATION_SUCCESS;
2433 int nvme_sc;
2434 u32 alloc_len = READ_CAP_10_RESP_SIZE;
2435 u32 resp_size = READ_CAP_10_RESP_SIZE;
2436 u32 xfer_len;
2437 u8 cdb16;
2438 struct nvme_dev *dev = ns->dev;
2439 dma_addr_t dma_addr;
2440 void *mem;
2441 struct nvme_id_ns *id_ns;
2442 u8 *response;
2443
2444 cdb16 = IS_READ_CAP_16(cmd);
2445 if (cdb16) {
2446 alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
2447 resp_size = READ_CAP_16_RESP_SIZE;
2448 }
2449
2450 mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
2451 &dma_addr, GFP_KERNEL);
2452 if (mem == NULL) {
2453 res = -ENOMEM;
2454 goto out;
2455 }
2456
2457 nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
2458 res = nvme_trans_status_code(hdr, nvme_sc);
2459 if (res)
2460 goto out_dma;
2461 if (nvme_sc) {
2462 res = nvme_sc;
2463 goto out_dma;
2464 }
2465 id_ns = mem;
2466
2467 response = kzalloc(resp_size, GFP_KERNEL);
2468 if (response == NULL) {
2469 res = -ENOMEM;
2470 goto out_dma;
2471 }
2472 nvme_trans_fill_read_cap(response, id_ns, cdb16);
2473
2474 xfer_len = min(alloc_len, resp_size);
2475 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2476
2477 kfree(response);
2478 out_dma:
2479 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
2480 dma_addr);
2481 out:
2482 return res;
2483}
2484
2485static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2486 u8 *cmd)
2487{
2488 int res = SNTI_TRANSLATION_SUCCESS;
2489 int nvme_sc;
2490 u32 alloc_len, xfer_len, resp_size;
2491 u8 select_report;
2492 u8 *response;
2493 struct nvme_dev *dev = ns->dev;
2494 dma_addr_t dma_addr;
2495 void *mem;
2496 struct nvme_id_ctrl *id_ctrl;
2497 u32 ll_length, lun_id;
2498 u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
2499 __be32 tmp_len;
2500
2501 alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
2502 select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
2503
2504 if ((select_report != ALL_LUNS_RETURNED) &&
2505 (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
2506 (select_report != RESTRICTED_LUNS_RETURNED)) {
2507 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2508 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2509 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2510 goto out;
2511 } else {
2512
2513 mem = dma_alloc_coherent(&dev->pci_dev->dev,
2514 sizeof(struct nvme_id_ctrl),
2515 &dma_addr, GFP_KERNEL);
2516 if (mem == NULL) {
2517 res = -ENOMEM;
2518 goto out;
2519 }
2520 nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
2521 res = nvme_trans_status_code(hdr, nvme_sc);
2522 if (res)
2523 goto out_dma;
2524 if (nvme_sc) {
2525 res = nvme_sc;
2526 goto out_dma;
2527 }
2528 id_ctrl = mem;
2529 ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
2530 resp_size = ll_length + LUN_DATA_HEADER_SIZE;
2531
2532 if (alloc_len < resp_size) {
2533 res = nvme_trans_completion(hdr,
2534 SAM_STAT_CHECK_CONDITION,
2535 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2536 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2537 goto out_dma;
2538 }
2539
2540 response = kzalloc(resp_size, GFP_KERNEL);
2541 if (response == NULL) {
2542 res = -ENOMEM;
2543 goto out_dma;
2544 }
2545
2546
2547 for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
2548
2549
2550
2551
2552 __be64 tmp_id = cpu_to_be64(lun_id);
2553 memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
2554 lun_id_offset += LUN_ENTRY_SIZE;
2555 }
2556 tmp_len = cpu_to_be32(ll_length);
2557 memcpy(response, &tmp_len, sizeof(u32));
2558 }
2559
2560 xfer_len = min(alloc_len, resp_size);
2561 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2562
2563 kfree(response);
2564 out_dma:
2565 dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
2566 dma_addr);
2567 out:
2568 return res;
2569}
2570
2571static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2572 u8 *cmd)
2573{
2574 int res = SNTI_TRANSLATION_SUCCESS;
2575 u8 alloc_len, xfer_len, resp_size;
2576 u8 desc_format;
2577 u8 *response;
2578
2579 alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
2580 desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
2581 desc_format &= REQUEST_SENSE_DESC_MASK;
2582
2583 resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
2584 (FIXED_FMT_SENSE_DATA_SIZE));
2585 response = kzalloc(resp_size, GFP_KERNEL);
2586 if (response == NULL) {
2587 res = -ENOMEM;
2588 goto out;
2589 }
2590
2591 if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
2592
2593 response[0] = DESC_FORMAT_SENSE_DATA;
2594 response[1] = NO_SENSE;
2595
2596 response[2] = SCSI_ASC_NO_SENSE;
2597 response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2598
2599 } else {
2600
2601 response[0] = FIXED_SENSE_DATA;
2602
2603 response[2] = NO_SENSE;
2604
2605 response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
2606
2607 response[12] = SCSI_ASC_NO_SENSE;
2608 response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2609
2610
2611 }
2612
2613 xfer_len = min(alloc_len, resp_size);
2614 res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2615
2616 kfree(response);
2617 out:
2618 return res;
2619}
2620
2621static int nvme_trans_security_protocol(struct nvme_ns *ns,
2622 struct sg_io_hdr *hdr,
2623 u8 *cmd)
2624{
2625 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2626 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2627 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2628}
2629
2630static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2631 u8 *cmd)
2632{
2633 int res = SNTI_TRANSLATION_SUCCESS;
2634 int nvme_sc;
2635 struct nvme_command c;
2636 u8 immed, pcmod, pc, no_flush, start;
2637
2638 immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
2639 pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
2640 pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
2641 no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
2642 start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
2643
2644 immed &= START_STOP_UNIT_CDB_IMMED_MASK;
2645 pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
2646 pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
2647 no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
2648 start &= START_STOP_UNIT_CDB_START_MASK;
2649
2650 if (immed != 0) {
2651 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2652 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2653 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2654 } else {
2655 if (no_flush == 0) {
2656
2657 memset(&c, 0, sizeof(c));
2658 c.common.opcode = nvme_cmd_flush;
2659 c.common.nsid = cpu_to_le32(ns->ns_id);
2660
2661 nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
2662 res = nvme_trans_status_code(hdr, nvme_sc);
2663 if (res)
2664 goto out;
2665 if (nvme_sc) {
2666 res = nvme_sc;
2667 goto out;
2668 }
2669 }
2670
2671 res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
2672 }
2673
2674 out:
2675 return res;
2676}
2677
2678static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2679 struct sg_io_hdr *hdr, u8 *cmd)
2680{
2681 int res = SNTI_TRANSLATION_SUCCESS;
2682 int nvme_sc;
2683 struct nvme_command c;
2684
2685 memset(&c, 0, sizeof(c));
2686 c.common.opcode = nvme_cmd_flush;
2687 c.common.nsid = cpu_to_le32(ns->ns_id);
2688
2689 nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
2690
2691 res = nvme_trans_status_code(hdr, nvme_sc);
2692 if (res)
2693 goto out;
2694 if (nvme_sc)
2695 res = nvme_sc;
2696
2697 out:
2698 return res;
2699}
2700
2701static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2702 u8 *cmd)
2703{
2704 int res = SNTI_TRANSLATION_SUCCESS;
2705 u8 parm_hdr_len = 0;
2706 u8 nvme_pf_code = 0;
2707 u8 format_prot_info, long_list, format_data;
2708
2709 format_prot_info = GET_U8_FROM_CDB(cmd,
2710 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
2711 long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
2712 format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
2713
2714 format_prot_info = (format_prot_info &
2715 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
2716 FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
2717 long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
2718 format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
2719
2720 if (format_data != 0) {
2721 if (format_prot_info != 0) {
2722 if (long_list == 0)
2723 parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
2724 else
2725 parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
2726 }
2727 } else if (format_data == 0 && format_prot_info != 0) {
2728 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2729 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2730 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2731 goto out;
2732 }
2733
2734
2735
2736
2737
2738
2739 if (parm_hdr_len > 0) {
2740 res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
2741 format_prot_info, &nvme_pf_code);
2742 if (res != SNTI_TRANSLATION_SUCCESS)
2743 goto out;
2744 }
2745
2746
2747 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
2748
2749
2750 res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
2751 if (res != SNTI_TRANSLATION_SUCCESS)
2752 goto out;
2753
2754 res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
2755
2756 out:
2757 return res;
2758}
2759
2760static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2761 struct sg_io_hdr *hdr,
2762 u8 *cmd)
2763{
2764 int res = SNTI_TRANSLATION_SUCCESS;
2765 struct nvme_dev *dev = ns->dev;
2766
2767 if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
2768 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2769 NOT_READY, SCSI_ASC_LUN_NOT_READY,
2770 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2771 else
2772 res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
2773
2774 return res;
2775}
2776
2777static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2778 u8 *cmd)
2779{
2780 int res = SNTI_TRANSLATION_SUCCESS;
2781 u32 buffer_offset, parm_list_length;
2782 u8 buffer_id, mode;
2783
2784 parm_list_length =
2785 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
2786 if (parm_list_length % BYTES_TO_DWORDS != 0) {
2787
2788 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2789 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2790 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2791 goto out;
2792 }
2793 buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
2794 if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
2795 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2796 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2797 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2798 goto out;
2799 }
2800 mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
2801 WRITE_BUFFER_CDB_MODE_MASK;
2802 buffer_offset =
2803 GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
2804
2805 switch (mode) {
2806 case DOWNLOAD_SAVE_ACTIVATE:
2807 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2808 parm_list_length, buffer_offset,
2809 buffer_id);
2810 if (res != SNTI_TRANSLATION_SUCCESS)
2811 goto out;
2812 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2813 parm_list_length, buffer_offset,
2814 buffer_id);
2815 break;
2816 case DOWNLOAD_SAVE_DEFER_ACTIVATE:
2817 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2818 parm_list_length, buffer_offset,
2819 buffer_id);
2820 break;
2821 case ACTIVATE_DEFERRED_MICROCODE:
2822 res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2823 parm_list_length, buffer_offset,
2824 buffer_id);
2825 break;
2826 default:
2827 res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2828 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2829 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2830 break;
2831 }
2832
2833 out:
2834 return res;
2835}
2836
2837struct scsi_unmap_blk_desc {
2838 __be64 slba;
2839 __be32 nlb;
2840 u32 resv;
2841};
2842
2843struct scsi_unmap_parm_list {
2844 __be16 unmap_data_len;
2845 __be16 unmap_blk_desc_data_len;
2846 u32 resv;
2847 struct scsi_unmap_blk_desc desc[0];
2848};
2849
2850static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2851 u8 *cmd)
2852{
2853 struct nvme_dev *dev = ns->dev;
2854 struct scsi_unmap_parm_list *plist;
2855 struct nvme_dsm_range *range;
2856 struct nvme_command c;
2857 int i, nvme_sc, res = -ENOMEM;
2858 u16 ndesc, list_len;
2859 dma_addr_t dma_addr;
2860
2861 list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET);
2862 if (!list_len)
2863 return -EINVAL;
2864
2865 plist = kmalloc(list_len, GFP_KERNEL);
2866 if (!plist)
2867 return -ENOMEM;
2868
2869 res = nvme_trans_copy_from_user(hdr, plist, list_len);
2870 if (res != SNTI_TRANSLATION_SUCCESS)
2871 goto out;
2872
2873 ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
2874 if (!ndesc || ndesc > 256) {
2875 res = -EINVAL;
2876 goto out;
2877 }
2878
2879 range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
2880 &dma_addr, GFP_KERNEL);
2881 if (!range)
2882 goto out;
2883
2884 for (i = 0; i < ndesc; i++) {
2885 range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
2886 range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
2887 range[i].cattr = 0;
2888 }
2889
2890 memset(&c, 0, sizeof(c));
2891 c.dsm.opcode = nvme_cmd_dsm;
2892 c.dsm.nsid = cpu_to_le32(ns->ns_id);
2893 c.dsm.prp1 = cpu_to_le64(dma_addr);
2894 c.dsm.nr = cpu_to_le32(ndesc - 1);
2895 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2896
2897 nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
2898 res = nvme_trans_status_code(hdr, nvme_sc);
2899
2900 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
2901 range, dma_addr);
2902 out:
2903 kfree(plist);
2904 return res;
2905}
2906
2907static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2908{
2909 u8 cmd[BLK_MAX_CDB];
2910 int retcode;
2911 unsigned int opcode;
2912
2913 if (hdr->cmdp == NULL)
2914 return -EMSGSIZE;
2915 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
2916 return -EFAULT;
2917
2918
2919
2920
2921
2922 retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2923 if (retcode)
2924 return retcode;
2925
2926 opcode = cmd[0];
2927
2928 switch (opcode) {
2929 case READ_6:
2930 case READ_10:
2931 case READ_12:
2932 case READ_16:
2933 retcode = nvme_trans_io(ns, hdr, 0, cmd);
2934 break;
2935 case WRITE_6:
2936 case WRITE_10:
2937 case WRITE_12:
2938 case WRITE_16:
2939 retcode = nvme_trans_io(ns, hdr, 1, cmd);
2940 break;
2941 case INQUIRY:
2942 retcode = nvme_trans_inquiry(ns, hdr, cmd);
2943 break;
2944 case LOG_SENSE:
2945 retcode = nvme_trans_log_sense(ns, hdr, cmd);
2946 break;
2947 case MODE_SELECT:
2948 case MODE_SELECT_10:
2949 retcode = nvme_trans_mode_select(ns, hdr, cmd);
2950 break;
2951 case MODE_SENSE:
2952 case MODE_SENSE_10:
2953 retcode = nvme_trans_mode_sense(ns, hdr, cmd);
2954 break;
2955 case READ_CAPACITY:
2956 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2957 break;
2958 case SERVICE_ACTION_IN_16:
2959 if (IS_READ_CAP_16(cmd))
2960 retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2961 else
2962 goto out;
2963 break;
2964 case REPORT_LUNS:
2965 retcode = nvme_trans_report_luns(ns, hdr, cmd);
2966 break;
2967 case REQUEST_SENSE:
2968 retcode = nvme_trans_request_sense(ns, hdr, cmd);
2969 break;
2970 case SECURITY_PROTOCOL_IN:
2971 case SECURITY_PROTOCOL_OUT:
2972 retcode = nvme_trans_security_protocol(ns, hdr, cmd);
2973 break;
2974 case START_STOP:
2975 retcode = nvme_trans_start_stop(ns, hdr, cmd);
2976 break;
2977 case SYNCHRONIZE_CACHE:
2978 retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
2979 break;
2980 case FORMAT_UNIT:
2981 retcode = nvme_trans_format_unit(ns, hdr, cmd);
2982 break;
2983 case TEST_UNIT_READY:
2984 retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
2985 break;
2986 case WRITE_BUFFER:
2987 retcode = nvme_trans_write_buffer(ns, hdr, cmd);
2988 break;
2989 case UNMAP:
2990 retcode = nvme_trans_unmap(ns, hdr, cmd);
2991 break;
2992 default:
2993 out:
2994 retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2995 ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2996 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2997 break;
2998 }
2999 return retcode;
3000}
3001
3002int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
3003{
3004 struct sg_io_hdr hdr;
3005 int retcode;
3006
3007 if (!capable(CAP_SYS_ADMIN))
3008 return -EACCES;
3009 if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
3010 return -EFAULT;
3011 if (hdr.interface_id != 'S')
3012 return -EINVAL;
3013 if (hdr.cmd_len > BLK_MAX_CDB)
3014 return -EINVAL;
3015
3016 retcode = nvme_scsi_translate(ns, &hdr);
3017 if (retcode < 0)
3018 return retcode;
3019 if (retcode > 0)
3020 retcode = SNTI_TRANSLATION_SUCCESS;
3021 if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
3022 return -EFAULT;
3023
3024 return retcode;
3025}
3026
3027int nvme_sg_get_version_num(int __user *ip)
3028{
3029 return put_user(sg_version_num, ip);
3030}
3031