1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <asm/vio.h>
31#include <asm/prom.h>
32#include <asm/iommu.h>
33#include <asm/hvcall.h>
34#include <linux/dma-mapping.h>
35#include <linux/interrupt.h>
36#include "ibmvscsi.h"
37
38static char partition_name[97] = "UNKNOWN";
39static unsigned int partition_number = -1;
40
41
42
43
44
45
46
47
48
49
50
51
52static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
53{
54 struct ibmvscsi_host_data *hostdata =
55 (struct ibmvscsi_host_data *)dev_instance;
56 vio_disable_interrupts(to_vio_dev(hostdata->dev));
57 tasklet_schedule(&hostdata->srp_task);
58 return IRQ_HANDLED;
59}
60
61
62
63
64
65
66
67
68
69static void rpavscsi_release_crq_queue(struct crq_queue *queue,
70 struct ibmvscsi_host_data *hostdata,
71 int max_requests)
72{
73 long rc;
74 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
75 free_irq(vdev->irq, (void *)hostdata);
76 tasklet_kill(&hostdata->srp_task);
77 do {
78 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
79 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
80 dma_unmap_single(hostdata->dev,
81 queue->msg_token,
82 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
83 free_page((unsigned long)queue->msgs);
84}
85
86
87
88
89
90
91
92
93static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
94{
95 struct viosrp_crq *crq;
96 unsigned long flags;
97
98 spin_lock_irqsave(&queue->lock, flags);
99 crq = &queue->msgs[queue->cur];
100 if (crq->valid & 0x80) {
101 if (++queue->cur == queue->size)
102 queue->cur = 0;
103 } else
104 crq = NULL;
105 spin_unlock_irqrestore(&queue->lock, flags);
106
107 return crq;
108}
109
110
111
112
113
114
115
116static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
117 u64 word1, u64 word2)
118{
119 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
120
121 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
122}
123
124
125
126
127
128static void rpavscsi_task(void *data)
129{
130 struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
131 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
132 struct viosrp_crq *crq;
133 int done = 0;
134
135 while (!done) {
136
137 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
138 ibmvscsi_handle_crq(crq, hostdata);
139 crq->valid = 0x00;
140 }
141
142 vio_enable_interrupts(vdev);
143 if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
144 vio_disable_interrupts(vdev);
145 ibmvscsi_handle_crq(crq, hostdata);
146 crq->valid = 0x00;
147 } else {
148 done = 1;
149 }
150 }
151}
152
153static void gather_partition_info(void)
154{
155 struct device_node *rootdn;
156
157 const char *ppartition_name;
158 const unsigned int *p_number_ptr;
159
160
161 rootdn = of_find_node_by_path("/");
162 if (!rootdn) {
163 return;
164 }
165
166 ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
167 if (ppartition_name)
168 strncpy(partition_name, ppartition_name,
169 sizeof(partition_name));
170 p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
171 if (p_number_ptr)
172 partition_number = *p_number_ptr;
173 of_node_put(rootdn);
174}
175
176static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
177{
178 memset(&hostdata->madapter_info, 0x00,
179 sizeof(hostdata->madapter_info));
180
181 dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
182 strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
183
184 strncpy(hostdata->madapter_info.partition_name, partition_name,
185 sizeof(hostdata->madapter_info.partition_name));
186
187 hostdata->madapter_info.partition_number = partition_number;
188
189 hostdata->madapter_info.mad_version = 1;
190 hostdata->madapter_info.os_type = 2;
191}
192
193
194
195
196
197
198
199static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
200 struct ibmvscsi_host_data *hostdata)
201{
202 int rc;
203 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
204
205
206 do {
207 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
208 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
209
210
211 memset(queue->msgs, 0x00, PAGE_SIZE);
212 queue->cur = 0;
213
214 set_adapter_info(hostdata);
215
216
217 rc = plpar_hcall_norets(H_REG_CRQ,
218 vdev->unit_address,
219 queue->msg_token, PAGE_SIZE);
220 if (rc == 2) {
221
222 dev_warn(hostdata->dev, "Partner adapter not ready\n");
223 } else if (rc != 0) {
224 dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
225 }
226 return rc;
227}
228
229
230
231
232
233
234
235
236
237
238static int rpavscsi_init_crq_queue(struct crq_queue *queue,
239 struct ibmvscsi_host_data *hostdata,
240 int max_requests)
241{
242 int rc;
243 int retrc;
244 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
245
246 queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
247
248 if (!queue->msgs)
249 goto malloc_failed;
250 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
251
252 queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
253 queue->size * sizeof(*queue->msgs),
254 DMA_BIDIRECTIONAL);
255
256 if (dma_mapping_error(queue->msg_token))
257 goto map_failed;
258
259 gather_partition_info();
260 set_adapter_info(hostdata);
261
262 retrc = rc = plpar_hcall_norets(H_REG_CRQ,
263 vdev->unit_address,
264 queue->msg_token, PAGE_SIZE);
265 if (rc == H_RESOURCE)
266
267 rc = rpavscsi_reset_crq_queue(queue,
268 hostdata);
269
270 if (rc == 2) {
271
272 dev_warn(hostdata->dev, "Partner adapter not ready\n");
273 retrc = 0;
274 } else if (rc != 0) {
275 dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
276 goto reg_crq_failed;
277 }
278
279 if (request_irq(vdev->irq,
280 rpavscsi_handle_event,
281 0, "ibmvscsi", (void *)hostdata) != 0) {
282 dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
283 vdev->irq);
284 goto req_irq_failed;
285 }
286
287 rc = vio_enable_interrupts(vdev);
288 if (rc != 0) {
289 dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
290 goto req_irq_failed;
291 }
292
293 queue->cur = 0;
294 spin_lock_init(&queue->lock);
295
296 tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
297 (unsigned long)hostdata);
298
299 return retrc;
300
301 req_irq_failed:
302 do {
303 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
304 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
305 reg_crq_failed:
306 dma_unmap_single(hostdata->dev,
307 queue->msg_token,
308 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
309 map_failed:
310 free_page((unsigned long)queue->msgs);
311 malloc_failed:
312 return -1;
313}
314
315
316
317
318
319
320
321static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
322 struct ibmvscsi_host_data *hostdata)
323{
324 int rc;
325 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
326
327
328 do {
329 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
330 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
331
332 if (rc)
333 dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
334 return rc;
335}
336
337struct ibmvscsi_ops rpavscsi_ops = {
338 .init_crq_queue = rpavscsi_init_crq_queue,
339 .release_crq_queue = rpavscsi_release_crq_queue,
340 .reset_crq_queue = rpavscsi_reset_crq_queue,
341 .reenable_crq_queue = rpavscsi_reenable_crq_queue,
342 .send_crq = rpavscsi_send_crq,
343};
344