1
2
3
4
5
6
7#include "efct_driver.h"
8#include "efct_hw.h"
9#include "efct_io.h"
10
11struct efct_io_pool {
12 struct efct *efct;
13 spinlock_t lock;
14 u32 io_num_ios;
15 struct efct_io *ios[EFCT_NUM_SCSI_IOS];
16 struct list_head freelist;
17
18};
19
20struct efct_io_pool *
21efct_io_pool_create(struct efct *efct, u32 num_sgl)
22{
23 u32 i = 0;
24 struct efct_io_pool *io_pool;
25 struct efct_io *io;
26
27
28 io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
29 if (!io_pool)
30 return NULL;
31
32 io_pool->efct = efct;
33 INIT_LIST_HEAD(&io_pool->freelist);
34
35 spin_lock_init(&io_pool->lock);
36
37 for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
38 io = kzalloc(sizeof(*io), GFP_KERNEL);
39 if (!io)
40 break;
41
42 io_pool->io_num_ios++;
43 io_pool->ios[i] = io;
44 io->tag = i;
45 io->instance_index = i;
46
47
48 io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
49 io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
50 io->rspbuf.size,
51 &io->rspbuf.phys, GFP_DMA);
52 if (!io->rspbuf.virt) {
53 efc_log_err(efct, "dma_alloc rspbuf failed\n");
54 efct_io_pool_free(io_pool);
55 return NULL;
56 }
57
58
59 io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
60 if (!io->sgl) {
61 efct_io_pool_free(io_pool);
62 return NULL;
63 }
64
65 memset(io->sgl, 0, sizeof(*io->sgl) * num_sgl);
66 io->sgl_allocated = num_sgl;
67 io->sgl_count = 0;
68
69 INIT_LIST_HEAD(&io->list_entry);
70 list_add_tail(&io->list_entry, &io_pool->freelist);
71 }
72
73 return io_pool;
74}
75
76int
77efct_io_pool_free(struct efct_io_pool *io_pool)
78{
79 struct efct *efct;
80 u32 i;
81 struct efct_io *io;
82
83 if (io_pool) {
84 efct = io_pool->efct;
85
86 for (i = 0; i < io_pool->io_num_ios; i++) {
87 io = io_pool->ios[i];
88 if (!io)
89 continue;
90
91 kfree(io->sgl);
92 dma_free_coherent(&efct->pci->dev,
93 io->rspbuf.size, io->rspbuf.virt,
94 io->rspbuf.phys);
95 memset(&io->rspbuf, 0, sizeof(struct efc_dma));
96 }
97
98 kfree(io_pool);
99 efct->xport->io_pool = NULL;
100 }
101
102 return 0;
103}
104
105struct efct_io *
106efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
107{
108 struct efct_io *io = NULL;
109 struct efct *efct;
110 unsigned long flags = 0;
111
112 efct = io_pool->efct;
113
114 spin_lock_irqsave(&io_pool->lock, flags);
115
116 if (!list_empty(&io_pool->freelist)) {
117 io = list_first_entry(&io_pool->freelist, struct efct_io,
118 list_entry);
119 list_del_init(&io->list_entry);
120 }
121
122 spin_unlock_irqrestore(&io_pool->lock, flags);
123
124 if (!io)
125 return NULL;
126
127 io->io_type = EFCT_IO_TYPE_MAX;
128 io->hio_type = EFCT_HW_IO_MAX;
129 io->hio = NULL;
130 io->transferred = 0;
131 io->efct = efct;
132 io->timeout = 0;
133 io->sgl_count = 0;
134 io->tgt_task_tag = 0;
135 io->init_task_tag = 0;
136 io->hw_tag = 0;
137 io->display_name = "pending";
138 io->seq_init = 0;
139 io->io_free = 0;
140 io->release = NULL;
141 atomic_add_return(1, &efct->xport->io_active_count);
142 atomic_add_return(1, &efct->xport->io_total_alloc);
143 return io;
144}
145
146
147void
148efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
149{
150 struct efct *efct;
151 struct efct_hw_io *hio = NULL;
152 unsigned long flags = 0;
153
154 efct = io_pool->efct;
155
156 spin_lock_irqsave(&io_pool->lock, flags);
157 hio = io->hio;
158 io->hio = NULL;
159 io->io_free = 1;
160 INIT_LIST_HEAD(&io->list_entry);
161 list_add(&io->list_entry, &io_pool->freelist);
162 spin_unlock_irqrestore(&io_pool->lock, flags);
163
164 if (hio)
165 efct_hw_io_free(&efct->hw, hio);
166
167 atomic_sub_return(1, &efct->xport->io_active_count);
168 atomic_add_return(1, &efct->xport->io_total_free);
169}
170
171
172struct efct_io *
173efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
174 u16 ox_id, u16 rx_id)
175{
176 struct efct_io *io = NULL;
177 unsigned long flags = 0;
178 u8 found = false;
179
180 spin_lock_irqsave(&node->active_ios_lock, flags);
181 list_for_each_entry(io, &node->active_ios, list_entry) {
182 if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
183 (rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
184 if (kref_get_unless_zero(&io->ref))
185 found = true;
186 break;
187 }
188 }
189 spin_unlock_irqrestore(&node->active_ios_lock, flags);
190 return found ? io : NULL;
191}
192