1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/interrupt.h>
24#include <linux/mutex.h>
25#include <linux/pci.h>
26#include <linux/io.h>
27
28#include <sound/core.h>
29#include "mixart.h"
30#include "mixart_hwdep.h"
31#include "mixart_core.h"
32
33
34#define MSG_TIMEOUT_JIFFIES (400 * HZ) / 1000
35
36#define MSG_DESCRIPTOR_SIZE 0x24
37#define MSG_HEADER_SIZE (MSG_DESCRIPTOR_SIZE + 4)
38
39#define MSG_DEFAULT_SIZE 512
40
41#define MSG_TYPE_MASK 0x00000003
42#define MSG_TYPE_NOTIFY 0
43#define MSG_TYPE_COMMAND 1
44#define MSG_TYPE_REQUEST 2
45#define MSG_TYPE_ANSWER 3
46#define MSG_CANCEL_NOTIFY_MASK 0x80000000
47
48
49static int retrieve_msg_frame(struct mixart_mgr *mgr, u32 *msg_frame)
50{
51
52 u32 headptr, tailptr;
53
54 tailptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_POST_TAIL));
55 headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_POST_HEAD));
56
57 if (tailptr == headptr)
58 return 0;
59
60 if (tailptr < MSG_OUTBOUND_POST_STACK)
61 return 0;
62 if (tailptr >= MSG_OUTBOUND_POST_STACK + MSG_BOUND_STACK_SIZE)
63 return 0;
64
65 *msg_frame = readl_be(MIXART_MEM(mgr, tailptr));
66
67
68 tailptr += 4;
69 if( tailptr >= (MSG_OUTBOUND_POST_STACK+MSG_BOUND_STACK_SIZE) )
70 tailptr = MSG_OUTBOUND_POST_STACK;
71 writel_be(tailptr, MIXART_MEM(mgr, MSG_OUTBOUND_POST_TAIL));
72
73 return 1;
74}
75
76static int get_msg(struct mixart_mgr *mgr, struct mixart_msg *resp,
77 u32 msg_frame_address )
78{
79 u32 headptr;
80 u32 size;
81 int err;
82#ifndef __BIG_ENDIAN
83 unsigned int i;
84#endif
85
86 mutex_lock(&mgr->msg_lock);
87 err = 0;
88
89
90 size = readl_be(MIXART_MEM(mgr, msg_frame_address));
91 resp->message_id = readl_be(MIXART_MEM(mgr, msg_frame_address + 4));
92 resp->uid.object_id = readl_be(MIXART_MEM(mgr, msg_frame_address + 8));
93 resp->uid.desc = readl_be(MIXART_MEM(mgr, msg_frame_address + 12));
94
95 if( (size < MSG_DESCRIPTOR_SIZE) || (resp->size < (size - MSG_DESCRIPTOR_SIZE))) {
96 err = -EINVAL;
97 dev_err(&mgr->pci->dev,
98 "problem with response size = %d\n", size);
99 goto _clean_exit;
100 }
101 size -= MSG_DESCRIPTOR_SIZE;
102
103 memcpy_fromio(resp->data, MIXART_MEM(mgr, msg_frame_address + MSG_HEADER_SIZE ), size);
104 resp->size = size;
105
106
107#ifndef __BIG_ENDIAN
108 size /= 4;
109 for(i=0; i < size; i++) {
110 ((u32*)resp->data)[i] = be32_to_cpu(((u32*)resp->data)[i]);
111 }
112#endif
113
114
115
116
117 headptr = readl_be(MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));
118
119 if( (headptr < MSG_OUTBOUND_FREE_STACK) || ( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) {
120 err = -EINVAL;
121 goto _clean_exit;
122 }
123
124
125 writel_be(msg_frame_address, MIXART_MEM(mgr, headptr));
126
127
128 headptr += 4;
129 if( headptr >= (MSG_OUTBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) )
130 headptr = MSG_OUTBOUND_FREE_STACK;
131
132 writel_be(headptr, MIXART_MEM(mgr, MSG_OUTBOUND_FREE_HEAD));
133
134 _clean_exit:
135 mutex_unlock(&mgr->msg_lock);
136
137 return err;
138}
139
140
141
142
143
144
145static int send_msg( struct mixart_mgr *mgr,
146 struct mixart_msg *msg,
147 int max_answersize,
148 int mark_pending,
149 u32 *msg_event)
150{
151 u32 headptr, tailptr;
152 u32 msg_frame_address;
153 int i;
154
155 if (snd_BUG_ON(msg->size % 4))
156 return -EINVAL;
157
158
159 tailptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_FREE_TAIL));
160 headptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_FREE_HEAD));
161
162 if (tailptr == headptr) {
163 dev_err(&mgr->pci->dev, "error: no message frame available\n");
164 return -EBUSY;
165 }
166
167 if( (tailptr < MSG_INBOUND_FREE_STACK) || (tailptr >= (MSG_INBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE))) {
168 return -EINVAL;
169 }
170
171 msg_frame_address = readl_be(MIXART_MEM(mgr, tailptr));
172 writel(0, MIXART_MEM(mgr, tailptr));
173
174
175 tailptr += 4;
176 if( tailptr >= (MSG_INBOUND_FREE_STACK+MSG_BOUND_STACK_SIZE) )
177 tailptr = MSG_INBOUND_FREE_STACK;
178
179 writel_be(tailptr, MIXART_MEM(mgr, MSG_INBOUND_FREE_TAIL));
180
181
182
183
184 writel_be( msg->size + MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address) );
185 writel_be( msg->message_id , MIXART_MEM(mgr, msg_frame_address + 4) );
186 writel_be( msg->uid.object_id, MIXART_MEM(mgr, msg_frame_address + 8) );
187 writel_be( msg->uid.desc, MIXART_MEM(mgr, msg_frame_address + 12) );
188 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 16) );
189 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 20) );
190 writel_be( msg->size, MIXART_MEM(mgr, msg_frame_address + 24) );
191 writel_be( MSG_DESCRIPTOR_SIZE, MIXART_MEM(mgr, msg_frame_address + 28) );
192 writel_be( 0, MIXART_MEM(mgr, msg_frame_address + 32) );
193 writel_be( MSG_DESCRIPTOR_SIZE + max_answersize, MIXART_MEM(mgr, msg_frame_address + 36) );
194
195
196 for( i=0; i < msg->size; i+=4 ) {
197 writel_be( *(u32*)(msg->data + i), MIXART_MEM(mgr, MSG_HEADER_SIZE + msg_frame_address + i) );
198 }
199
200 if( mark_pending ) {
201 if( *msg_event ) {
202
203 mgr->pending_event = *msg_event;
204 }
205 else {
206
207 mgr->pending_event = msg_frame_address;
208
209
210 *msg_event = msg_frame_address;
211 }
212 }
213
214
215 msg_frame_address |= MSG_TYPE_REQUEST;
216
217
218 headptr = readl_be(MIXART_MEM(mgr, MSG_INBOUND_POST_HEAD));
219
220 if( (headptr < MSG_INBOUND_POST_STACK) || (headptr >= (MSG_INBOUND_POST_STACK+MSG_BOUND_STACK_SIZE))) {
221 return -EINVAL;
222 }
223
224 writel_be(msg_frame_address, MIXART_MEM(mgr, headptr));
225
226
227 headptr += 4;
228 if( headptr >= (MSG_INBOUND_POST_STACK+MSG_BOUND_STACK_SIZE) )
229 headptr = MSG_INBOUND_POST_STACK;
230
231 writel_be(headptr, MIXART_MEM(mgr, MSG_INBOUND_POST_HEAD));
232
233 return 0;
234}
235
236
237int snd_mixart_send_msg(struct mixart_mgr *mgr, struct mixart_msg *request, int max_resp_size, void *resp_data)
238{
239 struct mixart_msg resp;
240 u32 msg_frame = 0;
241 int err;
242 wait_queue_t wait;
243 long timeout;
244
245 init_waitqueue_entry(&wait, current);
246
247 mutex_lock(&mgr->msg_lock);
248
249 err = send_msg(mgr, request, max_resp_size, 1, &msg_frame);
250 if (err) {
251 mutex_unlock(&mgr->msg_lock);
252 return err;
253 }
254
255 set_current_state(TASK_UNINTERRUPTIBLE);
256 add_wait_queue(&mgr->msg_sleep, &wait);
257 mutex_unlock(&mgr->msg_lock);
258 timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
259 remove_wait_queue(&mgr->msg_sleep, &wait);
260
261 if (! timeout) {
262
263 dev_err(&mgr->pci->dev,
264 "error: no response on msg %x\n", msg_frame);
265 return -EIO;
266 }
267
268
269 resp.message_id = 0;
270 resp.uid = (struct mixart_uid){0,0};
271 resp.data = resp_data;
272 resp.size = max_resp_size;
273
274 err = get_msg(mgr, &resp, msg_frame);
275
276 if( request->message_id != resp.message_id )
277 dev_err(&mgr->pci->dev, "RESPONSE ERROR!\n");
278
279 return err;
280}
281
282
283int snd_mixart_send_msg_wait_notif(struct mixart_mgr *mgr,
284 struct mixart_msg *request, u32 notif_event)
285{
286 int err;
287 wait_queue_t wait;
288 long timeout;
289
290 if (snd_BUG_ON(!notif_event))
291 return -EINVAL;
292 if (snd_BUG_ON((notif_event & MSG_TYPE_MASK) != MSG_TYPE_NOTIFY))
293 return -EINVAL;
294 if (snd_BUG_ON(notif_event & MSG_CANCEL_NOTIFY_MASK))
295 return -EINVAL;
296
297 init_waitqueue_entry(&wait, current);
298
299 mutex_lock(&mgr->msg_lock);
300
301 err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 1, ¬if_event);
302 if(err) {
303 mutex_unlock(&mgr->msg_lock);
304 return err;
305 }
306
307 set_current_state(TASK_UNINTERRUPTIBLE);
308 add_wait_queue(&mgr->msg_sleep, &wait);
309 mutex_unlock(&mgr->msg_lock);
310 timeout = schedule_timeout(MSG_TIMEOUT_JIFFIES);
311 remove_wait_queue(&mgr->msg_sleep, &wait);
312
313 if (! timeout) {
314
315 dev_err(&mgr->pci->dev,
316 "error: notification %x not received\n", notif_event);
317 return -EIO;
318 }
319
320 return 0;
321}
322
323
324int snd_mixart_send_msg_nonblock(struct mixart_mgr *mgr, struct mixart_msg *request)
325{
326 u32 message_frame;
327 int err;
328
329
330 mutex_lock(&mgr->msg_lock);
331 err = send_msg(mgr, request, MSG_DEFAULT_SIZE, 0, &message_frame);
332 mutex_unlock(&mgr->msg_lock);
333
334
335 atomic_inc(&mgr->msg_processed);
336
337 return err;
338}
339
340
341
342static u32 mixart_msg_data[MSG_DEFAULT_SIZE / 4];
343
344
345static void snd_mixart_process_msg(struct mixart_mgr *mgr)
346{
347 struct mixart_msg resp;
348 u32 msg, addr, type;
349 int err;
350
351 while (mgr->msg_fifo_readptr != mgr->msg_fifo_writeptr) {
352 msg = mgr->msg_fifo[mgr->msg_fifo_readptr];
353 mgr->msg_fifo_readptr++;
354 mgr->msg_fifo_readptr %= MSG_FIFO_SIZE;
355
356
357 addr = msg & ~MSG_TYPE_MASK;
358 type = msg & MSG_TYPE_MASK;
359
360 switch (type) {
361 case MSG_TYPE_ANSWER:
362
363 resp.message_id = 0;
364 resp.data = mixart_msg_data;
365 resp.size = sizeof(mixart_msg_data);
366 err = get_msg(mgr, &resp, addr);
367 if( err < 0 ) {
368 dev_err(&mgr->pci->dev,
369 "error(%d) reading mf %x\n",
370 err, msg);
371 break;
372 }
373
374 switch(resp.message_id) {
375 case MSG_STREAM_START_INPUT_STAGE_PACKET:
376 case MSG_STREAM_START_OUTPUT_STAGE_PACKET:
377 case MSG_STREAM_STOP_INPUT_STAGE_PACKET:
378 case MSG_STREAM_STOP_OUTPUT_STAGE_PACKET:
379 if(mixart_msg_data[0])
380 dev_err(&mgr->pci->dev,
381 "error MSG_STREAM_ST***_***PUT_STAGE_PACKET status=%x\n",
382 mixart_msg_data[0]);
383 break;
384 default:
385 dev_dbg(&mgr->pci->dev,
386 "received mf(%x) : msg_id(%x) uid(%x, %x) size(%zd)\n",
387 msg, resp.message_id, resp.uid.object_id, resp.uid.desc, resp.size);
388 break;
389 }
390 break;
391 case MSG_TYPE_NOTIFY:
392
393 case MSG_TYPE_COMMAND:
394
395 default:
396 dev_err(&mgr->pci->dev,
397 "doesn't know what to do with message %x\n",
398 msg);
399 }
400
401
402 atomic_dec(&mgr->msg_processed);
403
404 }
405}
406
407
408irqreturn_t snd_mixart_interrupt(int irq, void *dev_id)
409{
410 struct mixart_mgr *mgr = dev_id;
411 u32 it_reg;
412
413 it_reg = readl_le(MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET));
414 if( !(it_reg & MIXART_OIDI) ) {
415
416 return IRQ_NONE;
417 }
418
419
420 writel_le(MIXART_HOST_ALL_INTERRUPT_MASKED, MIXART_REG(mgr, MIXART_PCI_OMIMR_OFFSET));
421
422
423 it_reg = readl(MIXART_REG(mgr, MIXART_PCI_ODBR_OFFSET));
424 writel(it_reg, MIXART_REG(mgr, MIXART_PCI_ODBR_OFFSET));
425
426
427 writel_le( MIXART_OIDI, MIXART_REG(mgr, MIXART_PCI_OMISR_OFFSET) );
428
429 return IRQ_WAKE_THREAD;
430}
431
432irqreturn_t snd_mixart_threaded_irq(int irq, void *dev_id)
433{
434 struct mixart_mgr *mgr = dev_id;
435 int err;
436 struct mixart_msg resp;
437 u32 msg;
438
439 mutex_lock(&mgr->lock);
440
441 while (retrieve_msg_frame(mgr, &msg)) {
442
443 switch (msg & MSG_TYPE_MASK) {
444 case MSG_TYPE_COMMAND:
445 resp.message_id = 0;
446 resp.data = mixart_msg_data;
447 resp.size = sizeof(mixart_msg_data);
448 err = get_msg(mgr, &resp, msg & ~MSG_TYPE_MASK);
449 if( err < 0 ) {
450 dev_err(&mgr->pci->dev,
451 "interrupt: error(%d) reading mf %x\n",
452 err, msg);
453 break;
454 }
455
456 if(resp.message_id == MSG_SERVICES_TIMER_NOTIFY) {
457 int i;
458 struct mixart_timer_notify *notify;
459 notify = (struct mixart_timer_notify *)mixart_msg_data;
460
461 for(i=0; i<notify->stream_count; i++) {
462
463 u32 buffer_id = notify->streams[i].buffer_id;
464 unsigned int chip_number = (buffer_id & MIXART_NOTIFY_CARD_MASK) >> MIXART_NOTIFY_CARD_OFFSET;
465 unsigned int pcm_number = (buffer_id & MIXART_NOTIFY_PCM_MASK ) >> MIXART_NOTIFY_PCM_OFFSET;
466 unsigned int sub_number = buffer_id & MIXART_NOTIFY_SUBS_MASK;
467 unsigned int is_capture = ((buffer_id & MIXART_NOTIFY_CAPT_MASK) != 0);
468
469 struct snd_mixart *chip = mgr->chip[chip_number];
470 struct mixart_stream *stream;
471
472 if ((chip_number >= mgr->num_cards) || (pcm_number >= MIXART_PCM_TOTAL) || (sub_number >= MIXART_PLAYBACK_STREAMS)) {
473 dev_err(&mgr->pci->dev,
474 "error MSG_SERVICES_TIMER_NOTIFY buffer_id (%x) pos(%d)\n",
475 buffer_id, notify->streams[i].sample_pos_low_part);
476 break;
477 }
478
479 if (is_capture)
480 stream = &chip->capture_stream[pcm_number];
481 else
482 stream = &chip->playback_stream[pcm_number][sub_number];
483
484 if (stream->substream && (stream->status == MIXART_STREAM_STATUS_RUNNING)) {
485 struct snd_pcm_runtime *runtime = stream->substream->runtime;
486 int elapsed = 0;
487 u64 sample_count = ((u64)notify->streams[i].sample_pos_high_part) << 32;
488 sample_count |= notify->streams[i].sample_pos_low_part;
489
490 while (1) {
491 u64 new_elapse_pos = stream->abs_period_elapsed + runtime->period_size;
492
493 if (new_elapse_pos > sample_count) {
494 break;
495 }
496 else {
497 elapsed = 1;
498 stream->buf_periods++;
499 if (stream->buf_periods >= runtime->periods)
500 stream->buf_periods = 0;
501
502 stream->abs_period_elapsed = new_elapse_pos;
503 }
504 }
505 stream->buf_period_frag = (u32)( sample_count - stream->abs_period_elapsed );
506
507 if(elapsed) {
508 mutex_unlock(&mgr->lock);
509 snd_pcm_period_elapsed(stream->substream);
510 mutex_lock(&mgr->lock);
511 }
512 }
513 }
514 break;
515 }
516 if(resp.message_id == MSG_SERVICES_REPORT_TRACES) {
517 if(resp.size > 1) {
518#ifndef __BIG_ENDIAN
519
520 int i;
521 for(i=0; i<(resp.size/4); i++) {
522 (mixart_msg_data)[i] = cpu_to_be32((mixart_msg_data)[i]);
523 }
524#endif
525 ((char*)mixart_msg_data)[resp.size - 1] = 0;
526 dev_dbg(&mgr->pci->dev,
527 "MIXART TRACE : %s\n",
528 (char *)mixart_msg_data);
529 }
530 break;
531 }
532
533 dev_dbg(&mgr->pci->dev, "command %x not handled\n",
534 resp.message_id);
535 break;
536
537 case MSG_TYPE_NOTIFY:
538 if(msg & MSG_CANCEL_NOTIFY_MASK) {
539 msg &= ~MSG_CANCEL_NOTIFY_MASK;
540 dev_err(&mgr->pci->dev,
541 "canceled notification %x !\n", msg);
542 }
543
544 case MSG_TYPE_ANSWER:
545
546 mutex_lock(&mgr->msg_lock);
547 if( (msg & ~MSG_TYPE_MASK) == mgr->pending_event ) {
548 wake_up(&mgr->msg_sleep);
549 mgr->pending_event = 0;
550 }
551
552 else {
553 mgr->msg_fifo[mgr->msg_fifo_writeptr] = msg;
554 mgr->msg_fifo_writeptr++;
555 mgr->msg_fifo_writeptr %= MSG_FIFO_SIZE;
556 snd_mixart_process_msg(mgr);
557 }
558 mutex_unlock(&mgr->msg_lock);
559 break;
560 case MSG_TYPE_REQUEST:
561 default:
562 dev_dbg(&mgr->pci->dev,
563 "interrupt received request %x\n", msg);
564
565 break;
566 }
567 }
568
569
570 writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET));
571
572 mutex_unlock(&mgr->lock);
573
574 return IRQ_HANDLED;
575}
576
577
578void snd_mixart_init_mailbox(struct mixart_mgr *mgr)
579{
580 writel( 0, MIXART_MEM( mgr, MSG_HOST_RSC_PROTECTION ) );
581 writel( 0, MIXART_MEM( mgr, MSG_AGENT_RSC_PROTECTION ) );
582
583
584 if(mgr->irq >= 0) {
585 writel_le( MIXART_ALLOW_OUTBOUND_DOORBELL, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET));
586 }
587 return;
588}
589
590void snd_mixart_exit_mailbox(struct mixart_mgr *mgr)
591{
592
593 writel_le( MIXART_HOST_ALL_INTERRUPT_MASKED, MIXART_REG( mgr, MIXART_PCI_OMIMR_OFFSET));
594 return;
595}
596
597void snd_mixart_reset_board(struct mixart_mgr *mgr)
598{
599
600 writel_be( 1, MIXART_REG(mgr, MIXART_BA1_BRUTAL_RESET_OFFSET) );
601 return;
602}
603