1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define SOURCEFILE_NAME "hpimsgx.c"
24#include "hpi_internal.h"
25#include "hpi_version.h"
26#include "hpimsginit.h"
27#include "hpicmn.h"
28#include "hpimsgx.h"
29#include "hpidebug.h"
30
31static struct pci_device_id asihpi_pci_tbl[] = {
32#include "hpipcida.h"
33};
34
35static struct hpios_spinlock msgx_lock;
36
37static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
38static int logging_enabled = 1;
39
40static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
41 *pci_info)
42{
43
44 int i;
45
46 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
47 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
48 && asihpi_pci_tbl[i].vendor !=
49 pci_info->pci_dev->vendor)
50 continue;
51 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
52 && asihpi_pci_tbl[i].device !=
53 pci_info->pci_dev->device)
54 continue;
55 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
56 && asihpi_pci_tbl[i].subvendor !=
57 pci_info->pci_dev->subsystem_vendor)
58 continue;
59 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
60 && asihpi_pci_tbl[i].subdevice !=
61 pci_info->pci_dev->subsystem_device)
62 continue;
63
64
65
66 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
67 }
68
69 return NULL;
70}
71
72static inline void hw_entry_point(struct hpi_message *phm,
73 struct hpi_response *phr)
74{
75 if ((phm->adapter_index < HPI_MAX_ADAPTERS)
76 && hpi_entry_points[phm->adapter_index])
77 hpi_entry_points[phm->adapter_index] (phm, phr);
78 else
79 hpi_init_response(phr, phm->object, phm->function,
80 HPI_ERROR_PROCESSING_MESSAGE);
81}
82
83static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
84static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
85
86static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
87static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
88
89static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
90 void *h_owner);
91static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
92 void *h_owner);
93static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
94 void *h_owner);
95static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
96 void *h_owner);
97
98static void HPIMSGX__reset(u16 adapter_index);
99
100static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
101static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
102
103#ifndef DISABLE_PRAGMA_PACK1
104#pragma pack(push, 1)
105#endif
106
107struct hpi_subsys_response {
108 struct hpi_response_header h;
109 struct hpi_subsys_res s;
110};
111
112struct hpi_adapter_response {
113 struct hpi_response_header h;
114 struct hpi_adapter_res a;
115};
116
117struct hpi_mixer_response {
118 struct hpi_response_header h;
119 struct hpi_mixer_res m;
120};
121
122struct hpi_stream_response {
123 struct hpi_response_header h;
124 struct hpi_stream_res d;
125};
126
127struct adapter_info {
128 u16 type;
129 u16 num_instreams;
130 u16 num_outstreams;
131};
132
133struct asi_open_state {
134 int open_flag;
135 void *h_owner;
136};
137
138#ifndef DISABLE_PRAGMA_PACK1
139#pragma pack(pop)
140#endif
141
142
143static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
144
145static struct hpi_stream_response
146 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
147
148static struct hpi_stream_response
149 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
150
151static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
152
153static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
154
155
156static struct asi_open_state
157 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
158
159static struct asi_open_state
160 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
161
162static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
163 void *h_owner)
164{
165 if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
166 HPI_DEBUG_LOG(WARNING,
167 "suspicious adapter index %d in subsys message 0x%x.\n",
168 phm->adapter_index, phm->function);
169
170 switch (phm->function) {
171 case HPI_SUBSYS_GET_VERSION:
172 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
173 HPI_SUBSYS_GET_VERSION, 0);
174 phr->u.s.version = HPI_VER >> 8;
175 phr->u.s.data = HPI_VER;
176 break;
177 case HPI_SUBSYS_OPEN:
178
179 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
180 break;
181 case HPI_SUBSYS_CLOSE:
182
183 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
184 0);
185 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
186 break;
187 case HPI_SUBSYS_DRIVER_LOAD:
188
189 hpios_msgxlock_init(&msgx_lock);
190 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
191
192 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
193 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
194 HPI_SUBSYS_DRIVER_LOAD, 0);
195
196 HPI_COMMON(phm, phr);
197 break;
198 case HPI_SUBSYS_DRIVER_UNLOAD:
199 HPI_COMMON(phm, phr);
200 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
201 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
202 HPI_SUBSYS_DRIVER_UNLOAD, 0);
203 return;
204
205 case HPI_SUBSYS_GET_NUM_ADAPTERS:
206 case HPI_SUBSYS_GET_ADAPTER:
207 HPI_COMMON(phm, phr);
208 break;
209
210 case HPI_SUBSYS_CREATE_ADAPTER:
211 HPIMSGX__init(phm, phr);
212 break;
213
214 default:
215
216 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
217 HPI_ERROR_INVALID_FUNC);
218 break;
219 }
220}
221
222static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
223 void *h_owner)
224{
225 switch (phm->function) {
226 case HPI_ADAPTER_OPEN:
227 adapter_open(phm, phr);
228 break;
229 case HPI_ADAPTER_CLOSE:
230 adapter_close(phm, phr);
231 break;
232 case HPI_ADAPTER_DELETE:
233 HPIMSGX__cleanup(phm->adapter_index, h_owner);
234 {
235 struct hpi_message hm;
236 struct hpi_response hr;
237 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
238 HPI_ADAPTER_CLOSE);
239 hm.adapter_index = phm->adapter_index;
240 hw_entry_point(&hm, &hr);
241 }
242 hw_entry_point(phm, phr);
243 break;
244
245 default:
246 hw_entry_point(phm, phr);
247 break;
248 }
249}
250
251static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
252{
253 switch (phm->function) {
254 case HPI_MIXER_OPEN:
255 mixer_open(phm, phr);
256 break;
257 case HPI_MIXER_CLOSE:
258 mixer_close(phm, phr);
259 break;
260 default:
261 hw_entry_point(phm, phr);
262 break;
263 }
264}
265
266static void outstream_message(struct hpi_message *phm,
267 struct hpi_response *phr, void *h_owner)
268{
269 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
270 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
271 HPI_ERROR_INVALID_OBJ_INDEX);
272 return;
273 }
274
275 switch (phm->function) {
276 case HPI_OSTREAM_OPEN:
277 outstream_open(phm, phr, h_owner);
278 break;
279 case HPI_OSTREAM_CLOSE:
280 outstream_close(phm, phr, h_owner);
281 break;
282 default:
283 hw_entry_point(phm, phr);
284 break;
285 }
286}
287
288static void instream_message(struct hpi_message *phm,
289 struct hpi_response *phr, void *h_owner)
290{
291 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
292 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
293 HPI_ERROR_INVALID_OBJ_INDEX);
294 return;
295 }
296
297 switch (phm->function) {
298 case HPI_ISTREAM_OPEN:
299 instream_open(phm, phr, h_owner);
300 break;
301 case HPI_ISTREAM_CLOSE:
302 instream_close(phm, phr, h_owner);
303 break;
304 default:
305 hw_entry_point(phm, phr);
306 break;
307 }
308}
309
310
311
312
313void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
314 void *h_owner)
315{
316
317 if (logging_enabled)
318 HPI_DEBUG_MESSAGE(DEBUG, phm);
319
320 if (phm->type != HPI_TYPE_REQUEST) {
321 hpi_init_response(phr, phm->object, phm->function,
322 HPI_ERROR_INVALID_TYPE);
323 return;
324 }
325
326 if (phm->adapter_index >= HPI_MAX_ADAPTERS
327 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
328 hpi_init_response(phr, phm->object, phm->function,
329 HPI_ERROR_BAD_ADAPTER_NUMBER);
330 return;
331 }
332
333 switch (phm->object) {
334 case HPI_OBJ_SUBSYSTEM:
335 subsys_message(phm, phr, h_owner);
336 break;
337
338 case HPI_OBJ_ADAPTER:
339 adapter_message(phm, phr, h_owner);
340 break;
341
342 case HPI_OBJ_MIXER:
343 mixer_message(phm, phr);
344 break;
345
346 case HPI_OBJ_OSTREAM:
347 outstream_message(phm, phr, h_owner);
348 break;
349
350 case HPI_OBJ_ISTREAM:
351 instream_message(phm, phr, h_owner);
352 break;
353
354 default:
355 hw_entry_point(phm, phr);
356 break;
357 }
358
359 if (logging_enabled)
360 HPI_DEBUG_RESPONSE(phr);
361
362 if (phr->error >= HPI_ERROR_DSP_COMMUNICATION) {
363 hpi_debug_level_set(HPI_DEBUG_LEVEL_ERROR);
364 logging_enabled = 0;
365 }
366}
367
368static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
369{
370 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
371 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
372 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
373}
374
375static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
376{
377 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
378 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
379}
380
381static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
382{
383 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
384 sizeof(rESP_HPI_MIXER_OPEN[0]));
385}
386
387static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
388{
389 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
390}
391
392static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
393 void *h_owner)
394{
395
396 struct hpi_message hm;
397 struct hpi_response hr;
398
399 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
400
401 hpios_msgxlock_lock(&msgx_lock);
402
403 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
404 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
405 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
406 [phm->obj_index].h.error)
407 memcpy(phr,
408 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
409 obj_index],
410 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
411 else {
412 instream_user_open[phm->adapter_index][phm->
413 obj_index].open_flag = 1;
414 hpios_msgxlock_unlock(&msgx_lock);
415
416
417 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
418 HPI_ISTREAM_RESET);
419 hm.adapter_index = phm->adapter_index;
420 hm.obj_index = phm->obj_index;
421 hw_entry_point(&hm, &hr);
422
423 hpios_msgxlock_lock(&msgx_lock);
424 if (hr.error) {
425 instream_user_open[phm->adapter_index][phm->
426 obj_index].open_flag = 0;
427 phr->error = hr.error;
428 } else {
429 instream_user_open[phm->adapter_index][phm->
430 obj_index].open_flag = 1;
431 instream_user_open[phm->adapter_index][phm->
432 obj_index].h_owner = h_owner;
433 memcpy(phr,
434 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
435 [phm->obj_index],
436 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
437 }
438 }
439 hpios_msgxlock_unlock(&msgx_lock);
440}
441
442static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
443 void *h_owner)
444{
445
446 struct hpi_message hm;
447 struct hpi_response hr;
448
449 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
450
451 hpios_msgxlock_lock(&msgx_lock);
452 if (h_owner ==
453 instream_user_open[phm->adapter_index][phm->
454 obj_index].h_owner) {
455
456
457
458 instream_user_open[phm->adapter_index][phm->
459 obj_index].h_owner = NULL;
460 hpios_msgxlock_unlock(&msgx_lock);
461
462 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
463 HPI_ISTREAM_RESET);
464 hm.adapter_index = phm->adapter_index;
465 hm.obj_index = phm->obj_index;
466 hw_entry_point(&hm, &hr);
467 hpios_msgxlock_lock(&msgx_lock);
468 if (hr.error) {
469 instream_user_open[phm->adapter_index][phm->
470 obj_index].h_owner = h_owner;
471 phr->error = hr.error;
472 } else {
473 instream_user_open[phm->adapter_index][phm->
474 obj_index].open_flag = 0;
475 instream_user_open[phm->adapter_index][phm->
476 obj_index].h_owner = NULL;
477 }
478 } else {
479 HPI_DEBUG_LOG(WARNING,
480 "%p trying to close %d instream %d owned by %p\n",
481 h_owner, phm->adapter_index, phm->obj_index,
482 instream_user_open[phm->adapter_index][phm->
483 obj_index].h_owner);
484 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
485 }
486 hpios_msgxlock_unlock(&msgx_lock);
487}
488
489static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
490 void *h_owner)
491{
492
493 struct hpi_message hm;
494 struct hpi_response hr;
495
496 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
497
498 hpios_msgxlock_lock(&msgx_lock);
499
500 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
501 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
502 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
503 [phm->obj_index].h.error)
504 memcpy(phr,
505 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
506 obj_index],
507 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
508 else {
509 outstream_user_open[phm->adapter_index][phm->
510 obj_index].open_flag = 1;
511 hpios_msgxlock_unlock(&msgx_lock);
512
513
514 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
515 HPI_OSTREAM_RESET);
516 hm.adapter_index = phm->adapter_index;
517 hm.obj_index = phm->obj_index;
518 hw_entry_point(&hm, &hr);
519
520 hpios_msgxlock_lock(&msgx_lock);
521 if (hr.error) {
522 outstream_user_open[phm->adapter_index][phm->
523 obj_index].open_flag = 0;
524 phr->error = hr.error;
525 } else {
526 outstream_user_open[phm->adapter_index][phm->
527 obj_index].open_flag = 1;
528 outstream_user_open[phm->adapter_index][phm->
529 obj_index].h_owner = h_owner;
530 memcpy(phr,
531 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
532 [phm->obj_index],
533 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
534 }
535 }
536 hpios_msgxlock_unlock(&msgx_lock);
537}
538
539static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
540 void *h_owner)
541{
542
543 struct hpi_message hm;
544 struct hpi_response hr;
545
546 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
547
548 hpios_msgxlock_lock(&msgx_lock);
549
550 if (h_owner ==
551 outstream_user_open[phm->adapter_index][phm->
552 obj_index].h_owner) {
553
554
555
556 outstream_user_open[phm->adapter_index][phm->
557 obj_index].h_owner = NULL;
558 hpios_msgxlock_unlock(&msgx_lock);
559
560 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
561 HPI_OSTREAM_RESET);
562 hm.adapter_index = phm->adapter_index;
563 hm.obj_index = phm->obj_index;
564 hw_entry_point(&hm, &hr);
565 hpios_msgxlock_lock(&msgx_lock);
566 if (hr.error) {
567 outstream_user_open[phm->adapter_index][phm->
568 obj_index].h_owner = h_owner;
569 phr->error = hr.error;
570 } else {
571 outstream_user_open[phm->adapter_index][phm->
572 obj_index].open_flag = 0;
573 outstream_user_open[phm->adapter_index][phm->
574 obj_index].h_owner = NULL;
575 }
576 } else {
577 HPI_DEBUG_LOG(WARNING,
578 "%p trying to close %d outstream %d owned by %p\n",
579 h_owner, phm->adapter_index, phm->obj_index,
580 outstream_user_open[phm->adapter_index][phm->
581 obj_index].h_owner);
582 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
583 }
584 hpios_msgxlock_unlock(&msgx_lock);
585}
586
587static u16 adapter_prepare(u16 adapter)
588{
589 struct hpi_message hm;
590 struct hpi_response hr;
591
592
593 u16 i;
594
595
596 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
597 HPI_ADAPTER_OPEN);
598 hm.adapter_index = adapter;
599 hw_entry_point(&hm, &hr);
600 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
601 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
602 if (hr.error)
603 return hr.error;
604
605
606 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
607 HPI_ADAPTER_GET_INFO);
608 hm.adapter_index = adapter;
609 hw_entry_point(&hm, &hr);
610 if (hr.error)
611 return hr.error;
612
613 aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
614 aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
615 aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
616
617
618 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
619 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
620 HPI_OSTREAM_OPEN);
621 hm.adapter_index = adapter;
622 hm.obj_index = i;
623 hw_entry_point(&hm, &hr);
624 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
625 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
626 outstream_user_open[adapter][i].open_flag = 0;
627 outstream_user_open[adapter][i].h_owner = NULL;
628 }
629
630
631 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
632 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
633 HPI_ISTREAM_OPEN);
634 hm.adapter_index = adapter;
635 hm.obj_index = i;
636 hw_entry_point(&hm, &hr);
637 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
638 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
639 instream_user_open[adapter][i].open_flag = 0;
640 instream_user_open[adapter][i].h_owner = NULL;
641 }
642
643
644 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
645 hm.adapter_index = adapter;
646 hw_entry_point(&hm, &hr);
647 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
648 sizeof(rESP_HPI_MIXER_OPEN[0]));
649
650 return 0;
651}
652
653static void HPIMSGX__reset(u16 adapter_index)
654{
655 int i;
656 u16 adapter;
657 struct hpi_response hr;
658
659 if (adapter_index == HPIMSGX_ALLADAPTERS) {
660 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
661
662 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
663 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
664 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
665 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
666
667 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
668 HPI_ERROR_INVALID_OBJ);
669 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
670 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
671
672 for (i = 0; i < HPI_MAX_STREAMS; i++) {
673 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
674 HPI_OSTREAM_OPEN,
675 HPI_ERROR_INVALID_OBJ);
676 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
677 &hr,
678 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
679 [i]));
680 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
681 HPI_ISTREAM_OPEN,
682 HPI_ERROR_INVALID_OBJ);
683 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
684 &hr,
685 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
686 [i]));
687 }
688 }
689 } else if (adapter_index < HPI_MAX_ADAPTERS) {
690 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
691 HPI_ERROR_BAD_ADAPTER;
692 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
693 HPI_ERROR_INVALID_OBJ;
694 for (i = 0; i < HPI_MAX_STREAMS; i++) {
695 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
696 HPI_ERROR_INVALID_OBJ;
697 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
698 HPI_ERROR_INVALID_OBJ;
699 }
700 }
701}
702
703static u16 HPIMSGX__init(struct hpi_message *phm,
704
705
706 struct hpi_response *phr
707
708 )
709{
710 hpi_handler_func *entry_point_func;
711 struct hpi_response hr;
712
713
714 hpi_init_response(&hr, phm->object, phm->function,
715 HPI_ERROR_INVALID_OBJ);
716
717 entry_point_func =
718 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
719
720 if (entry_point_func) {
721 HPI_DEBUG_MESSAGE(DEBUG, phm);
722 entry_point_func(phm, &hr);
723 } else {
724 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
725 return phr->error;
726 }
727 if (hr.error == 0) {
728
729
730 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
731
732 HPI_DEBUG_LOG(DEBUG,
733 "HPI_SUBSYS_CREATE_ADAPTER successful,"
734 " preparing adapter\n");
735 adapter_prepare(hr.u.s.adapter_index);
736 }
737 memcpy(phr, &hr, hr.size);
738 return phr->error;
739}
740
741static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
742{
743 int i, adapter, adapter_limit;
744
745 if (!h_owner)
746 return;
747
748 if (adapter_index == HPIMSGX_ALLADAPTERS) {
749 adapter = 0;
750 adapter_limit = HPI_MAX_ADAPTERS;
751 } else {
752 adapter = adapter_index;
753 adapter_limit = adapter + 1;
754 }
755
756 for (; adapter < adapter_limit; adapter++) {
757
758 for (i = 0; i < HPI_MAX_STREAMS; i++) {
759 if (h_owner ==
760 outstream_user_open[adapter][i].h_owner) {
761 struct hpi_message hm;
762 struct hpi_response hr;
763
764 HPI_DEBUG_LOG(DEBUG,
765 "Close adapter %d ostream %d\n",
766 adapter, i);
767
768 hpi_init_message_response(&hm, &hr,
769 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
770 hm.adapter_index = (u16)adapter;
771 hm.obj_index = (u16)i;
772 hw_entry_point(&hm, &hr);
773
774 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
775 hw_entry_point(&hm, &hr);
776
777 hm.function = HPI_OSTREAM_GROUP_RESET;
778 hw_entry_point(&hm, &hr);
779
780 outstream_user_open[adapter][i].open_flag = 0;
781 outstream_user_open[adapter][i].h_owner =
782 NULL;
783 }
784 if (h_owner == instream_user_open[adapter][i].h_owner) {
785 struct hpi_message hm;
786 struct hpi_response hr;
787
788 HPI_DEBUG_LOG(DEBUG,
789 "Close adapter %d istream %d\n",
790 adapter, i);
791
792 hpi_init_message_response(&hm, &hr,
793 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
794 hm.adapter_index = (u16)adapter;
795 hm.obj_index = (u16)i;
796 hw_entry_point(&hm, &hr);
797
798 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
799 hw_entry_point(&hm, &hr);
800
801 hm.function = HPI_ISTREAM_GROUP_RESET;
802 hw_entry_point(&hm, &hr);
803
804 instream_user_open[adapter][i].open_flag = 0;
805 instream_user_open[adapter][i].h_owner = NULL;
806 }
807 }
808 }
809}
810