1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/string.h>
28#include <linux/parser.h>
29#include <linux/timer.h>
30#include <linux/slab.h>
31#include <linux/spinlock.h>
32#include <scsi/scsi.h>
33#include <scsi/scsi_host.h>
34
35#include <target/target_core_base.h>
36#include <target/target_core_backend.h>
37
38#include "target_core_rd.h"
39
40static inline struct rd_dev *RD_DEV(struct se_device *dev)
41{
42 return container_of(dev, struct rd_dev, dev);
43}
44
45
46
47
48
49static int rd_attach_hba(struct se_hba *hba, u32 host_id)
50{
51 struct rd_host *rd_host;
52
53 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
54 if (!rd_host) {
55 pr_err("Unable to allocate memory for struct rd_host\n");
56 return -ENOMEM;
57 }
58
59 rd_host->rd_host_id = host_id;
60
61 hba->hba_ptr = rd_host;
62
63 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
64 " Generic Target Core Stack %s\n", hba->hba_id,
65 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
66
67 return 0;
68}
69
70static void rd_detach_hba(struct se_hba *hba)
71{
72 struct rd_host *rd_host = hba->hba_ptr;
73
74 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
75 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
76
77 kfree(rd_host);
78 hba->hba_ptr = NULL;
79}
80
81
82
83
84
85static void rd_release_device_space(struct rd_dev *rd_dev)
86{
87 u32 i, j, page_count = 0, sg_per_table;
88 struct rd_dev_sg_table *sg_table;
89 struct page *pg;
90 struct scatterlist *sg;
91
92 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
93 return;
94
95 sg_table = rd_dev->sg_table_array;
96
97 for (i = 0; i < rd_dev->sg_table_count; i++) {
98 sg = sg_table[i].sg_table;
99 sg_per_table = sg_table[i].rd_sg_count;
100
101 for (j = 0; j < sg_per_table; j++) {
102 pg = sg_page(&sg[j]);
103 if (pg) {
104 __free_page(pg);
105 page_count++;
106 }
107 }
108
109 kfree(sg);
110 }
111
112 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
113 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
114 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
115 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
116
117 kfree(sg_table);
118 rd_dev->sg_table_array = NULL;
119 rd_dev->sg_table_count = 0;
120}
121
122
123
124
125
126
127static int rd_build_device_space(struct rd_dev *rd_dev)
128{
129 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
130 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
131 sizeof(struct scatterlist));
132 struct rd_dev_sg_table *sg_table;
133 struct page *pg;
134 struct scatterlist *sg;
135
136 if (rd_dev->rd_page_count <= 0) {
137 pr_err("Illegal page count: %u for Ramdisk device\n",
138 rd_dev->rd_page_count);
139 return -EINVAL;
140 }
141
142
143 if (rd_dev->rd_flags & RDF_NULLIO)
144 return 0;
145
146 total_sg_needed = rd_dev->rd_page_count;
147
148 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
149
150 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
151 if (!sg_table) {
152 pr_err("Unable to allocate memory for Ramdisk"
153 " scatterlist tables\n");
154 return -ENOMEM;
155 }
156
157 rd_dev->sg_table_array = sg_table;
158 rd_dev->sg_table_count = sg_tables;
159
160 while (total_sg_needed) {
161 sg_per_table = (total_sg_needed > max_sg_per_table) ?
162 max_sg_per_table : total_sg_needed;
163
164 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
165 GFP_KERNEL);
166 if (!sg) {
167 pr_err("Unable to allocate scatterlist array"
168 " for struct rd_dev\n");
169 return -ENOMEM;
170 }
171
172 sg_init_table(sg, sg_per_table);
173
174 sg_table[i].sg_table = sg;
175 sg_table[i].rd_sg_count = sg_per_table;
176 sg_table[i].page_start_offset = page_offset;
177 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
178 - 1;
179
180 for (j = 0; j < sg_per_table; j++) {
181 pg = alloc_pages(GFP_KERNEL, 0);
182 if (!pg) {
183 pr_err("Unable to allocate scatterlist"
184 " pages for struct rd_dev_sg_table\n");
185 return -ENOMEM;
186 }
187 sg_assign_page(&sg[j], pg);
188 sg[j].length = PAGE_SIZE;
189 }
190
191 page_offset += sg_per_table;
192 total_sg_needed -= sg_per_table;
193 }
194
195 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
196 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
197 rd_dev->rd_dev_id, rd_dev->rd_page_count,
198 rd_dev->sg_table_count);
199
200 return 0;
201}
202
203static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
204{
205 struct rd_dev *rd_dev;
206 struct rd_host *rd_host = hba->hba_ptr;
207
208 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
209 if (!rd_dev) {
210 pr_err("Unable to allocate memory for struct rd_dev\n");
211 return NULL;
212 }
213
214 rd_dev->rd_host = rd_host;
215
216 return &rd_dev->dev;
217}
218
219static int rd_configure_device(struct se_device *dev)
220{
221 struct rd_dev *rd_dev = RD_DEV(dev);
222 struct rd_host *rd_host = dev->se_hba->hba_ptr;
223 int ret;
224
225 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
226 pr_debug("Missing rd_pages= parameter\n");
227 return -EINVAL;
228 }
229
230 ret = rd_build_device_space(rd_dev);
231 if (ret < 0)
232 goto fail;
233
234 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
235 dev->dev_attrib.hw_max_sectors = UINT_MAX;
236 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
237
238 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
239
240 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
241 " %u pages in %u tables, %lu total bytes\n",
242 rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
243 rd_dev->sg_table_count,
244 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
245
246 return 0;
247
248fail:
249 rd_release_device_space(rd_dev);
250 return ret;
251}
252
253static void rd_free_device(struct se_device *dev)
254{
255 struct rd_dev *rd_dev = RD_DEV(dev);
256
257 rd_release_device_space(rd_dev);
258 kfree(rd_dev);
259}
260
261static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
262{
263 struct rd_dev_sg_table *sg_table;
264 u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
265 sizeof(struct scatterlist));
266
267 i = page / sg_per_table;
268 if (i < rd_dev->sg_table_count) {
269 sg_table = &rd_dev->sg_table_array[i];
270 if ((sg_table->page_start_offset <= page) &&
271 (sg_table->page_end_offset >= page))
272 return sg_table;
273 }
274
275 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
276 page);
277
278 return NULL;
279}
280
281static sense_reason_t
282rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
283 enum dma_data_direction data_direction)
284{
285 struct se_device *se_dev = cmd->se_dev;
286 struct rd_dev *dev = RD_DEV(se_dev);
287 struct rd_dev_sg_table *table;
288 struct scatterlist *rd_sg;
289 struct sg_mapping_iter m;
290 u32 rd_offset;
291 u32 rd_size;
292 u32 rd_page;
293 u32 src_len;
294 u64 tmp;
295
296 if (dev->rd_flags & RDF_NULLIO) {
297 target_complete_cmd(cmd, SAM_STAT_GOOD);
298 return 0;
299 }
300
301 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
302 rd_offset = do_div(tmp, PAGE_SIZE);
303 rd_page = tmp;
304 rd_size = cmd->data_length;
305
306 table = rd_get_sg_table(dev, rd_page);
307 if (!table)
308 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
309
310 rd_sg = &table->sg_table[rd_page - table->page_start_offset];
311
312 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
313 dev->rd_dev_id,
314 data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
315 cmd->t_task_lba, rd_size, rd_page, rd_offset);
316
317 src_len = PAGE_SIZE - rd_offset;
318 sg_miter_start(&m, sgl, sgl_nents,
319 data_direction == DMA_FROM_DEVICE ?
320 SG_MITER_TO_SG : SG_MITER_FROM_SG);
321 while (rd_size) {
322 u32 len;
323 void *rd_addr;
324
325 sg_miter_next(&m);
326 if (!(u32)m.length) {
327 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
328 dev->rd_dev_id, m.addr, m.length);
329 sg_miter_stop(&m);
330 return TCM_INCORRECT_AMOUNT_OF_DATA;
331 }
332 len = min((u32)m.length, src_len);
333 if (len > rd_size) {
334 pr_debug("RD[%u]: size underrun page %d offset %d "
335 "size %d\n", dev->rd_dev_id,
336 rd_page, rd_offset, rd_size);
337 len = rd_size;
338 }
339 m.consumed = len;
340
341 rd_addr = sg_virt(rd_sg) + rd_offset;
342
343 if (data_direction == DMA_FROM_DEVICE)
344 memcpy(m.addr, rd_addr, len);
345 else
346 memcpy(rd_addr, m.addr, len);
347
348 rd_size -= len;
349 if (!rd_size)
350 continue;
351
352 src_len -= len;
353 if (src_len) {
354 rd_offset += len;
355 continue;
356 }
357
358
359 rd_page++;
360 rd_offset = 0;
361 src_len = PAGE_SIZE;
362 if (rd_page <= table->page_end_offset) {
363 rd_sg++;
364 continue;
365 }
366
367 table = rd_get_sg_table(dev, rd_page);
368 if (!table) {
369 sg_miter_stop(&m);
370 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
371 }
372
373
374 rd_sg = table->sg_table;
375 }
376 sg_miter_stop(&m);
377
378 target_complete_cmd(cmd, SAM_STAT_GOOD);
379 return 0;
380}
381
382enum {
383 Opt_rd_pages, Opt_rd_nullio, Opt_err
384};
385
386static match_table_t tokens = {
387 {Opt_rd_pages, "rd_pages=%d"},
388 {Opt_rd_nullio, "rd_nullio=%d"},
389 {Opt_err, NULL}
390};
391
392static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
393 const char *page, ssize_t count)
394{
395 struct rd_dev *rd_dev = RD_DEV(dev);
396 char *orig, *ptr, *opts;
397 substring_t args[MAX_OPT_ARGS];
398 int ret = 0, arg, token;
399
400 opts = kstrdup(page, GFP_KERNEL);
401 if (!opts)
402 return -ENOMEM;
403
404 orig = opts;
405
406 while ((ptr = strsep(&opts, ",\n")) != NULL) {
407 if (!*ptr)
408 continue;
409
410 token = match_token(ptr, tokens, args);
411 switch (token) {
412 case Opt_rd_pages:
413 match_int(args, &arg);
414 rd_dev->rd_page_count = arg;
415 pr_debug("RAMDISK: Referencing Page"
416 " Count: %u\n", rd_dev->rd_page_count);
417 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
418 break;
419 case Opt_rd_nullio:
420 match_int(args, &arg);
421 if (arg != 1)
422 break;
423
424 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
425 rd_dev->rd_flags |= RDF_NULLIO;
426 break;
427 default:
428 break;
429 }
430 }
431
432 kfree(orig);
433 return (!ret) ? count : ret;
434}
435
436static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
437{
438 struct rd_dev *rd_dev = RD_DEV(dev);
439
440 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
441 rd_dev->rd_dev_id);
442 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
443 " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
444 PAGE_SIZE, rd_dev->sg_table_count,
445 !!(rd_dev->rd_flags & RDF_NULLIO));
446 return bl;
447}
448
449static sector_t rd_get_blocks(struct se_device *dev)
450{
451 struct rd_dev *rd_dev = RD_DEV(dev);
452
453 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
454 dev->dev_attrib.block_size) - 1;
455
456 return blocks_long;
457}
458
459static struct sbc_ops rd_sbc_ops = {
460 .execute_rw = rd_execute_rw,
461};
462
463static sense_reason_t
464rd_parse_cdb(struct se_cmd *cmd)
465{
466 return sbc_parse_cdb(cmd, &rd_sbc_ops);
467}
468
469static struct se_subsystem_api rd_mcp_template = {
470 .name = "rd_mcp",
471 .inquiry_prod = "RAMDISK-MCP",
472 .inquiry_rev = RD_MCP_VERSION,
473 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
474 .attach_hba = rd_attach_hba,
475 .detach_hba = rd_detach_hba,
476 .alloc_device = rd_alloc_device,
477 .configure_device = rd_configure_device,
478 .free_device = rd_free_device,
479 .parse_cdb = rd_parse_cdb,
480 .set_configfs_dev_params = rd_set_configfs_dev_params,
481 .show_configfs_dev_params = rd_show_configfs_dev_params,
482 .get_device_type = sbc_get_device_type,
483 .get_blocks = rd_get_blocks,
484};
485
486int __init rd_module_init(void)
487{
488 int ret;
489
490 ret = transport_subsystem_register(&rd_mcp_template);
491 if (ret < 0) {
492 return ret;
493 }
494
495 return 0;
496}
497
498void rd_module_exit(void)
499{
500 transport_subsystem_release(&rd_mcp_template);
501}
502