1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/pci.h>
23#include <linux/firmware.h>
24#include <linux/sched.h>
25#include <linux/delay.h>
26#include <linux/pm_runtime.h>
27#include <sound/core.h>
28#include <sound/pcm.h>
29#include <sound/soc.h>
30#include <sound/compress_driver.h>
31#include <asm/intel-mid.h>
32#include <asm/platform_sst_audio.h>
33#include "../sst-mfld-platform.h"
34#include "sst.h"
35#include "../../common/sst-dsp.h"
36
37struct sst_block *sst_create_block(struct intel_sst_drv *ctx,
38 u32 msg_id, u32 drv_id)
39{
40 struct sst_block *msg = NULL;
41
42 dev_dbg(ctx->dev, "Enter\n");
43 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
44 if (!msg)
45 return NULL;
46 msg->condition = false;
47 msg->on = true;
48 msg->msg_id = msg_id;
49 msg->drv_id = drv_id;
50 spin_lock_bh(&ctx->block_lock);
51 list_add_tail(&msg->node, &ctx->block_list);
52 spin_unlock_bh(&ctx->block_lock);
53
54 return msg;
55}
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72int sst_wake_up_block(struct intel_sst_drv *ctx, int result,
73 u32 drv_id, u32 ipc, void *data, u32 size)
74{
75 struct sst_block *block = NULL;
76
77 dev_dbg(ctx->dev, "Enter\n");
78
79 spin_lock_bh(&ctx->block_lock);
80 list_for_each_entry(block, &ctx->block_list, node) {
81 dev_dbg(ctx->dev, "Block ipc %d, drv_id %d\n", block->msg_id,
82 block->drv_id);
83 if (block->msg_id == ipc && block->drv_id == drv_id) {
84 dev_dbg(ctx->dev, "free up the block\n");
85 block->ret_code = result;
86 block->data = data;
87 block->size = size;
88 block->condition = true;
89 spin_unlock_bh(&ctx->block_lock);
90 wake_up(&ctx->wait_queue);
91 return 0;
92 }
93 }
94 spin_unlock_bh(&ctx->block_lock);
95 dev_dbg(ctx->dev,
96 "Block not found or a response received for a short msg for ipc %d, drv_id %d\n",
97 ipc, drv_id);
98 return -EINVAL;
99}
100
101int sst_free_block(struct intel_sst_drv *ctx, struct sst_block *freed)
102{
103 struct sst_block *block = NULL, *__block;
104
105 dev_dbg(ctx->dev, "Enter\n");
106 spin_lock_bh(&ctx->block_lock);
107 list_for_each_entry_safe(block, __block, &ctx->block_list, node) {
108 if (block == freed) {
109 pr_debug("pvt_id freed --> %d\n", freed->drv_id);
110
111 list_del(&freed->node);
112 spin_unlock_bh(&ctx->block_lock);
113 kfree(freed->data);
114 freed->data = NULL;
115 kfree(freed);
116 return 0;
117 }
118 }
119 spin_unlock_bh(&ctx->block_lock);
120 dev_err(ctx->dev, "block is already freed!!!\n");
121 return -EINVAL;
122}
123
124int sst_post_message_mrfld(struct intel_sst_drv *sst_drv_ctx,
125 struct ipc_post *ipc_msg, bool sync)
126{
127 struct ipc_post *msg = ipc_msg;
128 union ipc_header_mrfld header;
129 unsigned int loop_count = 0;
130 int retval = 0;
131 unsigned long irq_flags;
132
133 dev_dbg(sst_drv_ctx->dev, "Enter: sync: %d\n", sync);
134 spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
135 header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
136 if (sync) {
137 while (header.p.header_high.part.busy) {
138 if (loop_count > 25) {
139 dev_err(sst_drv_ctx->dev,
140 "sst: Busy wait failed, cant send this msg\n");
141 retval = -EBUSY;
142 goto out;
143 }
144 cpu_relax();
145 loop_count++;
146 header.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCX);
147 }
148 } else {
149 if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
150
151 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
152 dev_dbg(sst_drv_ctx->dev,
153 "Empty msg queue... NO Action\n");
154 return 0;
155 }
156
157 if (header.p.header_high.part.busy) {
158 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
159 dev_dbg(sst_drv_ctx->dev, "Busy not free... post later\n");
160 return 0;
161 }
162
163
164 msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
165 struct ipc_post, node);
166 list_del(&msg->node);
167 }
168 dev_dbg(sst_drv_ctx->dev, "sst: Post message: header = %x\n",
169 msg->mrfld_header.p.header_high.full);
170 dev_dbg(sst_drv_ctx->dev, "sst: size = 0x%x\n",
171 msg->mrfld_header.p.header_low_payload);
172
173 if (msg->mrfld_header.p.header_high.part.large)
174 memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
175 msg->mailbox_data,
176 msg->mrfld_header.p.header_low_payload);
177
178 sst_shim_write64(sst_drv_ctx->shim, SST_IPCX, msg->mrfld_header.full);
179
180out:
181 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
182 kfree(msg->mailbox_data);
183 kfree(msg);
184 return retval;
185}
186
187void intel_sst_clear_intr_mrfld(struct intel_sst_drv *sst_drv_ctx)
188{
189 union interrupt_reg_mrfld isr;
190 union interrupt_reg_mrfld imr;
191 union ipc_header_mrfld clear_ipc;
192 unsigned long irq_flags;
193
194 spin_lock_irqsave(&sst_drv_ctx->ipc_spin_lock, irq_flags);
195 imr.full = sst_shim_read64(sst_drv_ctx->shim, SST_IMRX);
196 isr.full = sst_shim_read64(sst_drv_ctx->shim, SST_ISRX);
197
198
199 isr.part.busy_interrupt = 1;
200 sst_shim_write64(sst_drv_ctx->shim, SST_ISRX, isr.full);
201
202
203 clear_ipc.full = sst_shim_read64(sst_drv_ctx->shim, SST_IPCD);
204
205 clear_ipc.p.header_high.part.busy = 0;
206 clear_ipc.p.header_high.part.done = 1;
207 clear_ipc.p.header_low_payload = IPC_ACK_SUCCESS;
208 sst_shim_write64(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
209
210 imr.part.busy_interrupt = 0;
211 sst_shim_write64(sst_drv_ctx->shim, SST_IMRX, imr.full);
212 spin_unlock_irqrestore(&sst_drv_ctx->ipc_spin_lock, irq_flags);
213}
214
215
216
217
218
219
220
221
222
223
224static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
225 void *msg)
226{
227 struct ipc_header_fw_init *init =
228 (struct ipc_header_fw_init *)msg;
229 int retval = 0;
230
231 dev_dbg(sst_drv_ctx->dev, "*** FW Init msg came***\n");
232 if (init->result) {
233 sst_set_fw_state_locked(sst_drv_ctx, SST_RESET);
234 dev_err(sst_drv_ctx->dev, "FW Init failed, Error %x\n",
235 init->result);
236 retval = init->result;
237 goto ret;
238 }
239 if (memcmp(&sst_drv_ctx->fw_version, &init->fw_version,
240 sizeof(init->fw_version)))
241 dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
242 init->fw_version.type, init->fw_version.major,
243 init->fw_version.minor, init->fw_version.build);
244 dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
245 init->build_info.date, init->build_info.time);
246
247
248 sst_drv_ctx->fw_version.type = init->fw_version.type;
249 sst_drv_ctx->fw_version.major = init->fw_version.major;
250 sst_drv_ctx->fw_version.minor = init->fw_version.minor;
251 sst_drv_ctx->fw_version.build = init->fw_version.build;
252
253ret:
254 sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
255}
256
257static void process_fw_async_msg(struct intel_sst_drv *sst_drv_ctx,
258 struct ipc_post *msg)
259{
260 u32 msg_id;
261 int str_id;
262 u32 data_size, i;
263 void *data_offset;
264 struct stream_info *stream;
265 u32 msg_low, pipe_id;
266
267 msg_low = msg->mrfld_header.p.header_low_payload;
268 msg_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->cmd_id;
269 data_offset = (msg->mailbox_data + sizeof(struct ipc_dsp_hdr));
270 data_size = msg_low - (sizeof(struct ipc_dsp_hdr));
271
272 switch (msg_id) {
273 case IPC_SST_PERIOD_ELAPSED_MRFLD:
274 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
275 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
276 if (str_id > 0) {
277 dev_dbg(sst_drv_ctx->dev,
278 "Period elapsed rcvd for pipe id 0x%x\n",
279 pipe_id);
280 stream = &sst_drv_ctx->streams[str_id];
281
282 if (stream->status == STREAM_INIT)
283 break;
284 if (stream->period_elapsed)
285 stream->period_elapsed(stream->pcm_substream);
286 if (stream->compr_cb)
287 stream->compr_cb(stream->compr_cb_param);
288 }
289 break;
290
291 case IPC_IA_DRAIN_STREAM_MRFLD:
292 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
293 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
294 if (str_id > 0) {
295 stream = &sst_drv_ctx->streams[str_id];
296 if (stream->drain_notify)
297 stream->drain_notify(stream->drain_cb_param);
298 }
299 break;
300
301 case IPC_IA_FW_ASYNC_ERR_MRFLD:
302 dev_err(sst_drv_ctx->dev, "FW sent async error msg:\n");
303 for (i = 0; i < (data_size/4); i++)
304 print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
305 16, 4, data_offset, data_size, false);
306 break;
307
308 case IPC_IA_FW_INIT_CMPLT_MRFLD:
309 process_fw_init(sst_drv_ctx, data_offset);
310 break;
311
312 case IPC_IA_BUF_UNDER_RUN_MRFLD:
313 pipe_id = ((struct ipc_dsp_hdr *)msg->mailbox_data)->pipe_id;
314 str_id = get_stream_id_mrfld(sst_drv_ctx, pipe_id);
315 if (str_id > 0)
316 dev_err(sst_drv_ctx->dev,
317 "Buffer under-run for pipe:%#x str_id:%d\n",
318 pipe_id, str_id);
319 break;
320
321 default:
322 dev_err(sst_drv_ctx->dev,
323 "Unrecognized async msg from FW msg_id %#x\n", msg_id);
324 }
325}
326
327void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
328 struct ipc_post *msg)
329{
330 unsigned int drv_id;
331 void *data;
332 union ipc_header_high msg_high;
333 u32 msg_low;
334 struct ipc_dsp_hdr *dsp_hdr;
335
336 msg_high = msg->mrfld_header.p.header_high;
337 msg_low = msg->mrfld_header.p.header_low_payload;
338
339 dev_dbg(sst_drv_ctx->dev, "IPC process message header %x payload %x\n",
340 msg->mrfld_header.p.header_high.full,
341 msg->mrfld_header.p.header_low_payload);
342
343 drv_id = msg_high.part.drv_id;
344
345
346 if (drv_id == SST_ASYNC_DRV_ID) {
347
348 process_fw_async_msg(sst_drv_ctx, msg);
349 return;
350 }
351
352
353 if (msg_high.part.result && drv_id && !msg_high.part.large) {
354
355 dev_err(sst_drv_ctx->dev, "FW sent error response 0x%x", msg_low);
356 sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
357 msg_high.part.drv_id,
358 msg_high.part.msg_id, NULL, 0);
359 return;
360 }
361
362
363
364
365
366
367 if (msg_high.part.large) {
368 data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
369 if (!data)
370 return;
371
372 dsp_hdr = (struct ipc_dsp_hdr *)data;
373 dev_dbg(sst_drv_ctx->dev, "cmd_id %d\n", dsp_hdr->cmd_id);
374 if (sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
375 msg_high.part.drv_id,
376 msg_high.part.msg_id, data, msg_low))
377 kfree(data);
378 } else {
379 sst_wake_up_block(sst_drv_ctx, msg_high.part.result,
380 msg_high.part.drv_id,
381 msg_high.part.msg_id, NULL, 0);
382 }
383
384}
385