1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/fs.h>
20#include "wmi.h"
21#include "wil6210.h"
22#include "txrx.h"
23#include "pmc.h"
24
25struct desc_alloc_info {
26 dma_addr_t pa;
27 void *va;
28};
29
30static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
31{
32 return !!pmc->pring_va;
33}
34
35void wil_pmc_init(struct wil6210_priv *wil)
36{
37 memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
38 mutex_init(&wil->pmc.lock);
39}
40
41
42
43
44
45
46
47
48
49void wil_pmc_alloc(struct wil6210_priv *wil,
50 int num_descriptors,
51 int descriptor_size)
52{
53 u32 i;
54 struct pmc_ctx *pmc = &wil->pmc;
55 struct device *dev = wil_to_dev(wil);
56 struct wmi_pmc_cmd pmc_cmd = {0};
57 int last_cmd_err = -ENOMEM;
58
59 mutex_lock(&pmc->lock);
60
61 if (wil_is_pmc_allocated(pmc)) {
62
63 wil_err(wil, "ERROR pmc is already allocated\n");
64 goto no_release_err;
65 }
66 if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
67 wil_err(wil,
68 "Invalid params num_descriptors(%d), descriptor_size(%d)\n",
69 num_descriptors, descriptor_size);
70 last_cmd_err = -EINVAL;
71 goto no_release_err;
72 }
73
74 if (num_descriptors > (1 << WIL_RING_SIZE_ORDER_MAX)) {
75 wil_err(wil,
76 "num_descriptors(%d) exceeds max ring size %d\n",
77 num_descriptors, 1 << WIL_RING_SIZE_ORDER_MAX);
78 last_cmd_err = -EINVAL;
79 goto no_release_err;
80 }
81
82 if (num_descriptors > INT_MAX / descriptor_size) {
83 wil_err(wil,
84 "Overflow in num_descriptors(%d)*descriptor_size(%d)\n",
85 num_descriptors, descriptor_size);
86 last_cmd_err = -EINVAL;
87 goto no_release_err;
88 }
89
90 pmc->num_descriptors = num_descriptors;
91 pmc->descriptor_size = descriptor_size;
92
93 wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
94 num_descriptors, descriptor_size);
95
96
97 pmc->descriptors = kcalloc(num_descriptors,
98 sizeof(struct desc_alloc_info),
99 GFP_KERNEL);
100 if (!pmc->descriptors) {
101 wil_err(wil, "ERROR allocating pmc skb list\n");
102 goto no_release_err;
103 }
104
105 wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
106 pmc->descriptors);
107
108
109
110
111
112 pmc->pring_va = dma_alloc_coherent(dev,
113 sizeof(struct vring_tx_desc) * num_descriptors,
114 &pmc->pring_pa,
115 GFP_KERNEL);
116
117 wil_dbg_misc(wil,
118 "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
119 pmc->pring_va, &pmc->pring_pa,
120 sizeof(struct vring_tx_desc),
121 num_descriptors,
122 sizeof(struct vring_tx_desc) * num_descriptors);
123
124 if (!pmc->pring_va) {
125 wil_err(wil, "ERROR allocating pmc pring\n");
126 goto release_pmc_skb_list;
127 }
128
129
130
131
132
133 for (i = 0; i < num_descriptors; i++) {
134 struct vring_tx_desc *_d = &pmc->pring_va[i];
135 struct vring_tx_desc dd = {}, *d = ⅆ
136 int j = 0;
137
138 pmc->descriptors[i].va = dma_alloc_coherent(dev,
139 descriptor_size,
140 &pmc->descriptors[i].pa,
141 GFP_KERNEL);
142
143 if (unlikely(!pmc->descriptors[i].va)) {
144 wil_err(wil, "ERROR allocating pmc descriptor %d", i);
145 goto release_pmc_skbs;
146 }
147
148 for (j = 0; j < descriptor_size / sizeof(u32); j++) {
149 u32 *p = (u32 *)pmc->descriptors[i].va + j;
150 *p = PCM_DATA_INVALID_DW_VAL | j;
151 }
152
153
154 d->dma.addr.addr_low =
155 cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
156 d->dma.addr.addr_high =
157 cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
158 d->dma.status = 0;
159 d->dma.length = cpu_to_le16(descriptor_size);
160 d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
161 *_d = *d;
162 }
163
164 wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
165
166 pmc_cmd.op = WMI_PMC_ALLOCATE;
167 pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
168 pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
169
170 wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
171 pmc->last_cmd_status = wmi_send(wil,
172 WMI_PMC_CMDID,
173 &pmc_cmd,
174 sizeof(pmc_cmd));
175 if (pmc->last_cmd_status) {
176 wil_err(wil,
177 "WMI_PMC_CMD with ALLOCATE op failed with status %d",
178 pmc->last_cmd_status);
179 goto release_pmc_skbs;
180 }
181
182 mutex_unlock(&pmc->lock);
183
184 return;
185
186release_pmc_skbs:
187 wil_err(wil, "exit on error: Releasing skbs...\n");
188 for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
189 dma_free_coherent(dev,
190 descriptor_size,
191 pmc->descriptors[i].va,
192 pmc->descriptors[i].pa);
193
194 pmc->descriptors[i].va = NULL;
195 }
196 wil_err(wil, "exit on error: Releasing pring...\n");
197
198 dma_free_coherent(dev,
199 sizeof(struct vring_tx_desc) * num_descriptors,
200 pmc->pring_va,
201 pmc->pring_pa);
202
203 pmc->pring_va = NULL;
204
205release_pmc_skb_list:
206 wil_err(wil, "exit on error: Releasing descriptors info list...\n");
207 kfree(pmc->descriptors);
208 pmc->descriptors = NULL;
209
210no_release_err:
211 pmc->last_cmd_status = last_cmd_err;
212 mutex_unlock(&pmc->lock);
213}
214
215
216
217
218
219void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
220{
221 struct pmc_ctx *pmc = &wil->pmc;
222 struct device *dev = wil_to_dev(wil);
223 struct wmi_pmc_cmd pmc_cmd = {0};
224
225 mutex_lock(&pmc->lock);
226
227 pmc->last_cmd_status = 0;
228
229 if (!wil_is_pmc_allocated(pmc)) {
230 wil_dbg_misc(wil,
231 "pmc_free: Error, can't free - not allocated\n");
232 pmc->last_cmd_status = -EPERM;
233 mutex_unlock(&pmc->lock);
234 return;
235 }
236
237 if (send_pmc_cmd) {
238 wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
239 pmc_cmd.op = WMI_PMC_RELEASE;
240 pmc->last_cmd_status =
241 wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
242 sizeof(pmc_cmd));
243 if (pmc->last_cmd_status) {
244 wil_err(wil,
245 "WMI_PMC_CMD with RELEASE op failed, status %d",
246 pmc->last_cmd_status);
247
248
249
250
251 }
252 }
253
254 if (pmc->pring_va) {
255 size_t buf_size = sizeof(struct vring_tx_desc) *
256 pmc->num_descriptors;
257
258 wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
259 pmc->pring_va);
260 dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
261
262 pmc->pring_va = NULL;
263 } else {
264 pmc->last_cmd_status = -ENOENT;
265 }
266
267 if (pmc->descriptors) {
268 int i;
269
270 for (i = 0;
271 pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
272 dma_free_coherent(dev,
273 pmc->descriptor_size,
274 pmc->descriptors[i].va,
275 pmc->descriptors[i].pa);
276 pmc->descriptors[i].va = NULL;
277 }
278 wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
279 pmc->num_descriptors);
280 wil_dbg_misc(wil,
281 "pmc_free: free pmc descriptors info list %p\n",
282 pmc->descriptors);
283 kfree(pmc->descriptors);
284 pmc->descriptors = NULL;
285 } else {
286 pmc->last_cmd_status = -ENOENT;
287 }
288
289 mutex_unlock(&pmc->lock);
290}
291
292
293
294
295
296int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
297{
298 wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
299 wil->pmc.last_cmd_status);
300
301 return wil->pmc.last_cmd_status;
302}
303
304
305
306
307
308ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
309 loff_t *f_pos)
310{
311 struct wil6210_priv *wil = filp->private_data;
312 struct pmc_ctx *pmc = &wil->pmc;
313 size_t retval = 0;
314 unsigned long long idx;
315 loff_t offset;
316 size_t pmc_size;
317
318 mutex_lock(&pmc->lock);
319
320 if (!wil_is_pmc_allocated(pmc)) {
321 wil_err(wil, "error, pmc is not allocated!\n");
322 pmc->last_cmd_status = -EPERM;
323 mutex_unlock(&pmc->lock);
324 return -EPERM;
325 }
326
327 pmc_size = pmc->descriptor_size * pmc->num_descriptors;
328
329 wil_dbg_misc(wil,
330 "pmc_read: size %u, pos %lld\n",
331 (u32)count, *f_pos);
332
333 pmc->last_cmd_status = 0;
334
335 idx = *f_pos;
336 do_div(idx, pmc->descriptor_size);
337 offset = *f_pos - (idx * pmc->descriptor_size);
338
339 if (*f_pos >= pmc_size) {
340 wil_dbg_misc(wil,
341 "pmc_read: reached end of pmc buf: %lld >= %u\n",
342 *f_pos, (u32)pmc_size);
343 pmc->last_cmd_status = -ERANGE;
344 goto out;
345 }
346
347 wil_dbg_misc(wil,
348 "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
349 *f_pos, idx, offset, count);
350
351
352 retval = simple_read_from_buffer(buf,
353 count,
354 &offset,
355 pmc->descriptors[idx].va,
356 pmc->descriptor_size);
357 *f_pos += retval;
358out:
359 mutex_unlock(&pmc->lock);
360
361 return retval;
362}
363
364loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
365{
366 loff_t newpos;
367 struct wil6210_priv *wil = filp->private_data;
368 struct pmc_ctx *pmc = &wil->pmc;
369 size_t pmc_size;
370
371 mutex_lock(&pmc->lock);
372
373 if (!wil_is_pmc_allocated(pmc)) {
374 wil_err(wil, "error, pmc is not allocated!\n");
375 pmc->last_cmd_status = -EPERM;
376 mutex_unlock(&pmc->lock);
377 return -EPERM;
378 }
379
380 pmc_size = pmc->descriptor_size * pmc->num_descriptors;
381
382 switch (whence) {
383 case 0:
384 newpos = off;
385 break;
386
387 case 1:
388 newpos = filp->f_pos + off;
389 break;
390
391 case 2:
392 newpos = pmc_size;
393 break;
394
395 default:
396 newpos = -EINVAL;
397 goto out;
398 }
399
400 if (newpos < 0) {
401 newpos = -EINVAL;
402 goto out;
403 }
404 if (newpos > pmc_size)
405 newpos = pmc_size;
406
407 filp->f_pos = newpos;
408
409out:
410 mutex_unlock(&pmc->lock);
411
412 return newpos;
413}
414