1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/types.h>
19#include <linux/errno.h>
20#include <linux/fs.h>
21#include "wmi.h"
22#include "wil6210.h"
23#include "txrx.h"
24#include "pmc.h"
25
26struct desc_alloc_info {
27 dma_addr_t pa;
28 void *va;
29};
30
31static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
32{
33 return !!pmc->pring_va;
34}
35
36void wil_pmc_init(struct wil6210_priv *wil)
37{
38 memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
39 mutex_init(&wil->pmc.lock);
40}
41
42
43
44
45
46
47
48
49
50void wil_pmc_alloc(struct wil6210_priv *wil,
51 int num_descriptors,
52 int descriptor_size)
53{
54 u32 i;
55 struct pmc_ctx *pmc = &wil->pmc;
56 struct device *dev = wil_to_dev(wil);
57 struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
58 struct wmi_pmc_cmd pmc_cmd = {0};
59 int last_cmd_err = -ENOMEM;
60
61 mutex_lock(&pmc->lock);
62
63 if (wil_is_pmc_allocated(pmc)) {
64
65 wil_err(wil, "ERROR pmc is already allocated\n");
66 goto no_release_err;
67 }
68 if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
69 wil_err(wil,
70 "Invalid params num_descriptors(%d), descriptor_size(%d)\n",
71 num_descriptors, descriptor_size);
72 last_cmd_err = -EINVAL;
73 goto no_release_err;
74 }
75
76 if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
77 wil_err(wil,
78 "num_descriptors(%d) exceeds max ring size %d\n",
79 num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
80 last_cmd_err = -EINVAL;
81 goto no_release_err;
82 }
83
84 if (num_descriptors > INT_MAX / descriptor_size) {
85 wil_err(wil,
86 "Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
87 num_descriptors, descriptor_size);
88 last_cmd_err = -EINVAL;
89 goto no_release_err;
90 }
91
92 pmc->num_descriptors = num_descriptors;
93 pmc->descriptor_size = descriptor_size;
94
95 wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
96 num_descriptors, descriptor_size);
97
98
99 pmc->descriptors = kcalloc(num_descriptors,
100 sizeof(struct desc_alloc_info),
101 GFP_KERNEL);
102 if (!pmc->descriptors) {
103 wil_err(wil, "ERROR allocating pmc skb list\n");
104 goto no_release_err;
105 }
106
107 wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
108 pmc->descriptors);
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 if (wil->dma_addr_size > 32)
124 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
125
126 pmc->pring_va = dma_alloc_coherent(dev,
127 sizeof(struct vring_tx_desc) * num_descriptors,
128 &pmc->pring_pa,
129 GFP_KERNEL);
130
131 if (wil->dma_addr_size > 32)
132 dma_set_mask_and_coherent(dev,
133 DMA_BIT_MASK(wil->dma_addr_size));
134
135 wil_dbg_misc(wil,
136 "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
137 pmc->pring_va, &pmc->pring_pa,
138 sizeof(struct vring_tx_desc),
139 num_descriptors,
140 sizeof(struct vring_tx_desc) * num_descriptors);
141
142 if (!pmc->pring_va) {
143 wil_err(wil, "ERROR allocating pmc pring\n");
144 goto release_pmc_skb_list;
145 }
146
147
148
149
150
151 for (i = 0; i < num_descriptors; i++) {
152 struct vring_tx_desc *_d = &pmc->pring_va[i];
153 struct vring_tx_desc dd = {}, *d = ⅆ
154 int j = 0;
155
156 pmc->descriptors[i].va = dma_alloc_coherent(dev,
157 descriptor_size,
158 &pmc->descriptors[i].pa,
159 GFP_KERNEL);
160
161 if (unlikely(!pmc->descriptors[i].va)) {
162 wil_err(wil, "ERROR allocating pmc descriptor %d", i);
163 goto release_pmc_skbs;
164 }
165
166 for (j = 0; j < descriptor_size / sizeof(u32); j++) {
167 u32 *p = (u32 *)pmc->descriptors[i].va + j;
168 *p = PCM_DATA_INVALID_DW_VAL | j;
169 }
170
171
172 d->dma.addr.addr_low =
173 cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
174 d->dma.addr.addr_high =
175 cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
176 d->dma.status = 0;
177 d->dma.length = cpu_to_le16(descriptor_size);
178 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
179 *_d = *d;
180 }
181
182 wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
183
184 pmc_cmd.op = WMI_PMC_ALLOCATE;
185 pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
186 pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
187
188 wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
189 pmc->last_cmd_status = wmi_send(wil,
190 WMI_PMC_CMDID,
191 vif->mid,
192 &pmc_cmd,
193 sizeof(pmc_cmd));
194 if (pmc->last_cmd_status) {
195 wil_err(wil,
196 "WMI_PMC_CMD with ALLOCATE op failed with status %d",
197 pmc->last_cmd_status);
198 goto release_pmc_skbs;
199 }
200
201 mutex_unlock(&pmc->lock);
202
203 return;
204
205release_pmc_skbs:
206 wil_err(wil, "exit on error: Releasing skbs...\n");
207 for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
208 dma_free_coherent(dev,
209 descriptor_size,
210 pmc->descriptors[i].va,
211 pmc->descriptors[i].pa);
212
213 pmc->descriptors[i].va = NULL;
214 }
215 wil_err(wil, "exit on error: Releasing pring...\n");
216
217 dma_free_coherent(dev,
218 sizeof(struct vring_tx_desc) * num_descriptors,
219 pmc->pring_va,
220 pmc->pring_pa);
221
222 pmc->pring_va = NULL;
223
224release_pmc_skb_list:
225 wil_err(wil, "exit on error: Releasing descriptors info list...\n");
226 kfree(pmc->descriptors);
227 pmc->descriptors = NULL;
228
229no_release_err:
230 pmc->last_cmd_status = last_cmd_err;
231 mutex_unlock(&pmc->lock);
232}
233
234
235
236
237
238void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
239{
240 struct pmc_ctx *pmc = &wil->pmc;
241 struct device *dev = wil_to_dev(wil);
242 struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
243 struct wmi_pmc_cmd pmc_cmd = {0};
244
245 mutex_lock(&pmc->lock);
246
247 pmc->last_cmd_status = 0;
248
249 if (!wil_is_pmc_allocated(pmc)) {
250 wil_dbg_misc(wil,
251 "pmc_free: Error, can't free - not allocated\n");
252 pmc->last_cmd_status = -EPERM;
253 mutex_unlock(&pmc->lock);
254 return;
255 }
256
257 if (send_pmc_cmd) {
258 wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
259 pmc_cmd.op = WMI_PMC_RELEASE;
260 pmc->last_cmd_status =
261 wmi_send(wil, WMI_PMC_CMDID, vif->mid,
262 &pmc_cmd, sizeof(pmc_cmd));
263 if (pmc->last_cmd_status) {
264 wil_err(wil,
265 "WMI_PMC_CMD with RELEASE op failed, status %d",
266 pmc->last_cmd_status);
267
268
269
270
271 }
272 }
273
274 if (pmc->pring_va) {
275 size_t buf_size = sizeof(struct vring_tx_desc) *
276 pmc->num_descriptors;
277
278 wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
279 pmc->pring_va);
280 dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
281
282 pmc->pring_va = NULL;
283 } else {
284 pmc->last_cmd_status = -ENOENT;
285 }
286
287 if (pmc->descriptors) {
288 int i;
289
290 for (i = 0;
291 i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
292 dma_free_coherent(dev,
293 pmc->descriptor_size,
294 pmc->descriptors[i].va,
295 pmc->descriptors[i].pa);
296 pmc->descriptors[i].va = NULL;
297 }
298 wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
299 pmc->num_descriptors);
300 wil_dbg_misc(wil,
301 "pmc_free: free pmc descriptors info list %p\n",
302 pmc->descriptors);
303 kfree(pmc->descriptors);
304 pmc->descriptors = NULL;
305 } else {
306 pmc->last_cmd_status = -ENOENT;
307 }
308
309 mutex_unlock(&pmc->lock);
310}
311
312
313
314
315
316int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
317{
318 wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
319 wil->pmc.last_cmd_status);
320
321 return wil->pmc.last_cmd_status;
322}
323
324
325
326
327
328ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
329 loff_t *f_pos)
330{
331 struct wil6210_priv *wil = filp->private_data;
332 struct pmc_ctx *pmc = &wil->pmc;
333 size_t retval = 0;
334 unsigned long long idx;
335 loff_t offset;
336 size_t pmc_size;
337
338 mutex_lock(&pmc->lock);
339
340 if (!wil_is_pmc_allocated(pmc)) {
341 wil_err(wil, "error, pmc is not allocated!\n");
342 pmc->last_cmd_status = -EPERM;
343 mutex_unlock(&pmc->lock);
344 return -EPERM;
345 }
346
347 pmc_size = pmc->descriptor_size * pmc->num_descriptors;
348
349 wil_dbg_misc(wil,
350 "pmc_read: size %u, pos %lld\n",
351 (u32)count, *f_pos);
352
353 pmc->last_cmd_status = 0;
354
355 idx = *f_pos;
356 do_div(idx, pmc->descriptor_size);
357 offset = *f_pos - (idx * pmc->descriptor_size);
358
359 if (*f_pos >= pmc_size) {
360 wil_dbg_misc(wil,
361 "pmc_read: reached end of pmc buf: %lld >= %u\n",
362 *f_pos, (u32)pmc_size);
363 pmc->last_cmd_status = -ERANGE;
364 goto out;
365 }
366
367 wil_dbg_misc(wil,
368 "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
369 *f_pos, idx, offset, count);
370
371
372 retval = simple_read_from_buffer(buf,
373 count,
374 &offset,
375 pmc->descriptors[idx].va,
376 pmc->descriptor_size);
377 *f_pos += retval;
378out:
379 mutex_unlock(&pmc->lock);
380
381 return retval;
382}
383
384loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
385{
386 loff_t newpos;
387 struct wil6210_priv *wil = filp->private_data;
388 struct pmc_ctx *pmc = &wil->pmc;
389 size_t pmc_size;
390
391 mutex_lock(&pmc->lock);
392
393 if (!wil_is_pmc_allocated(pmc)) {
394 wil_err(wil, "error, pmc is not allocated!\n");
395 pmc->last_cmd_status = -EPERM;
396 mutex_unlock(&pmc->lock);
397 return -EPERM;
398 }
399
400 pmc_size = pmc->descriptor_size * pmc->num_descriptors;
401
402 switch (whence) {
403 case 0:
404 newpos = off;
405 break;
406
407 case 1:
408 newpos = filp->f_pos + off;
409 break;
410
411 case 2:
412 newpos = pmc_size;
413 break;
414
415 default:
416 newpos = -EINVAL;
417 goto out;
418 }
419
420 if (newpos < 0) {
421 newpos = -EINVAL;
422 goto out;
423 }
424 if (newpos > pmc_size)
425 newpos = pmc_size;
426
427 filp->f_pos = newpos;
428
429out:
430 mutex_unlock(&pmc->lock);
431
432 return newpos;
433}
434