1
2
3
4
5
6
7
8#define KMSG_COMPONENT "sclp_cmd"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/completion.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/mmzone.h>
20#include <linux/memory.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <asm/ctl_reg.h>
24#include <asm/chpid.h>
25#include <asm/setup.h>
26#include <asm/page.h>
27#include <asm/sclp.h>
28
29#include "sclp.h"
30
31static void sclp_sync_callback(struct sclp_req *req, void *data)
32{
33 struct completion *completion = data;
34
35 complete(completion);
36}
37
38int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
39{
40 struct completion completion;
41 struct sclp_req *request;
42 int rc;
43
44 request = kzalloc(sizeof(*request), GFP_KERNEL);
45 if (!request)
46 return -ENOMEM;
47 request->command = cmd;
48 request->sccb = sccb;
49 request->status = SCLP_REQ_FILLED;
50 request->callback = sclp_sync_callback;
51 request->callback_data = &completion;
52 init_completion(&completion);
53
54
55 rc = sclp_add_request(request);
56 if (rc)
57 goto out;
58 wait_for_completion(&completion);
59
60
61 if (request->status != SCLP_REQ_DONE) {
62 pr_warning("sync request failed (cmd=0x%08x, "
63 "status=0x%02x)\n", cmd, request->status);
64 rc = -EIO;
65 }
66out:
67 kfree(request);
68 return rc;
69}
70
71
72
73
74
75#define SCLP_CMDW_READ_CPU_INFO 0x00010001
76#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
77#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
78
79struct read_cpu_info_sccb {
80 struct sccb_header header;
81 u16 nr_configured;
82 u16 offset_configured;
83 u16 nr_standby;
84 u16 offset_standby;
85 u8 reserved[4096 - 16];
86} __attribute__((packed, aligned(PAGE_SIZE)));
87
88static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
89 struct read_cpu_info_sccb *sccb)
90{
91 char *page = (char *) sccb;
92
93 memset(info, 0, sizeof(*info));
94 info->configured = sccb->nr_configured;
95 info->standby = sccb->nr_standby;
96 info->combined = sccb->nr_configured + sccb->nr_standby;
97 info->has_cpu_type = sclp_fac84 & 0x1;
98 memcpy(&info->cpu, page + sccb->offset_configured,
99 info->combined * sizeof(struct sclp_cpu_entry));
100}
101
102int sclp_get_cpu_info(struct sclp_cpu_info *info)
103{
104 int rc;
105 struct read_cpu_info_sccb *sccb;
106
107 if (!SCLP_HAS_CPU_INFO)
108 return -EOPNOTSUPP;
109 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
110 if (!sccb)
111 return -ENOMEM;
112 sccb->header.length = sizeof(*sccb);
113 rc = sclp_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
114 if (rc)
115 goto out;
116 if (sccb->header.response_code != 0x0010) {
117 pr_warning("readcpuinfo failed (response=0x%04x)\n",
118 sccb->header.response_code);
119 rc = -EIO;
120 goto out;
121 }
122 sclp_fill_cpu_info(info, sccb);
123out:
124 free_page((unsigned long) sccb);
125 return rc;
126}
127
128struct cpu_configure_sccb {
129 struct sccb_header header;
130} __attribute__((packed, aligned(8)));
131
132static int do_cpu_configure(sclp_cmdw_t cmd)
133{
134 struct cpu_configure_sccb *sccb;
135 int rc;
136
137 if (!SCLP_HAS_CPU_RECONFIG)
138 return -EOPNOTSUPP;
139
140
141
142
143 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
144 if (!sccb)
145 return -ENOMEM;
146 sccb->header.length = sizeof(*sccb);
147 rc = sclp_sync_request(cmd, sccb);
148 if (rc)
149 goto out;
150 switch (sccb->header.response_code) {
151 case 0x0020:
152 case 0x0120:
153 break;
154 default:
155 pr_warning("configure cpu failed (cmd=0x%08x, "
156 "response=0x%04x)\n", cmd,
157 sccb->header.response_code);
158 rc = -EIO;
159 break;
160 }
161out:
162 kfree(sccb);
163 return rc;
164}
165
166int sclp_cpu_configure(u8 cpu)
167{
168 return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
169}
170
171int sclp_cpu_deconfigure(u8 cpu)
172{
173 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
174}
175
176#ifdef CONFIG_MEMORY_HOTPLUG
177
178static DEFINE_MUTEX(sclp_mem_mutex);
179static LIST_HEAD(sclp_mem_list);
180static u8 sclp_max_storage_id;
181static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
182static int sclp_mem_state_changed;
183
184struct memory_increment {
185 struct list_head list;
186 u16 rn;
187 int standby;
188};
189
190struct assign_storage_sccb {
191 struct sccb_header header;
192 u16 rn;
193} __packed;
194
195int arch_get_memory_phys_device(unsigned long start_pfn)
196{
197 if (!sclp_rzm)
198 return 0;
199 return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm);
200}
201
202static unsigned long long rn2addr(u16 rn)
203{
204 return (unsigned long long) (rn - 1) * sclp_rzm;
205}
206
207static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
208{
209 struct assign_storage_sccb *sccb;
210 int rc;
211
212 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
213 if (!sccb)
214 return -ENOMEM;
215 sccb->header.length = PAGE_SIZE;
216 sccb->rn = rn;
217 rc = sclp_sync_request(cmd, sccb);
218 if (rc)
219 goto out;
220 switch (sccb->header.response_code) {
221 case 0x0020:
222 case 0x0120:
223 break;
224 default:
225 pr_warning("assign storage failed (cmd=0x%08x, "
226 "response=0x%04x, rn=0x%04x)\n", cmd,
227 sccb->header.response_code, rn);
228 rc = -EIO;
229 break;
230 }
231out:
232 free_page((unsigned long) sccb);
233 return rc;
234}
235
236static int sclp_assign_storage(u16 rn)
237{
238 unsigned long long start;
239 int rc;
240
241 rc = do_assign_storage(0x000d0001, rn);
242 if (rc)
243 return rc;
244 start = rn2addr(rn);
245 storage_key_init_range(start, start + sclp_rzm);
246 return 0;
247}
248
249static int sclp_unassign_storage(u16 rn)
250{
251 return do_assign_storage(0x000c0001, rn);
252}
253
254struct attach_storage_sccb {
255 struct sccb_header header;
256 u16 :16;
257 u16 assigned;
258 u32 :32;
259 u32 entries[0];
260} __packed;
261
262static int sclp_attach_storage(u8 id)
263{
264 struct attach_storage_sccb *sccb;
265 int rc;
266 int i;
267
268 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
269 if (!sccb)
270 return -ENOMEM;
271 sccb->header.length = PAGE_SIZE;
272 sccb->header.function_code = 0x40;
273 rc = sclp_sync_request(0x00080001 | id << 8, sccb);
274 if (rc)
275 goto out;
276 switch (sccb->header.response_code) {
277 case 0x0020:
278 set_bit(id, sclp_storage_ids);
279 for (i = 0; i < sccb->assigned; i++) {
280 if (sccb->entries[i])
281 sclp_unassign_storage(sccb->entries[i] >> 16);
282 }
283 break;
284 default:
285 rc = -EIO;
286 break;
287 }
288out:
289 free_page((unsigned long) sccb);
290 return rc;
291}
292
293static int sclp_mem_change_state(unsigned long start, unsigned long size,
294 int online)
295{
296 struct memory_increment *incr;
297 unsigned long long istart;
298 int rc = 0;
299
300 list_for_each_entry(incr, &sclp_mem_list, list) {
301 istart = rn2addr(incr->rn);
302 if (start + size - 1 < istart)
303 break;
304 if (start > istart + sclp_rzm - 1)
305 continue;
306 if (online)
307 rc |= sclp_assign_storage(incr->rn);
308 else
309 sclp_unassign_storage(incr->rn);
310 if (rc == 0)
311 incr->standby = online ? 0 : 1;
312 }
313 return rc ? -EIO : 0;
314}
315
316static bool contains_standby_increment(unsigned long start, unsigned long end)
317{
318 struct memory_increment *incr;
319 unsigned long istart;
320
321 list_for_each_entry(incr, &sclp_mem_list, list) {
322 istart = rn2addr(incr->rn);
323 if (end - 1 < istart)
324 continue;
325 if (start > istart + sclp_rzm - 1)
326 continue;
327 if (incr->standby)
328 return true;
329 }
330 return false;
331}
332
333static int sclp_mem_notifier(struct notifier_block *nb,
334 unsigned long action, void *data)
335{
336 unsigned long start, size;
337 struct memory_notify *arg;
338 unsigned char id;
339 int rc = 0;
340
341 arg = data;
342 start = arg->start_pfn << PAGE_SHIFT;
343 size = arg->nr_pages << PAGE_SHIFT;
344 mutex_lock(&sclp_mem_mutex);
345 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
346 sclp_attach_storage(id);
347 switch (action) {
348 case MEM_GOING_OFFLINE:
349
350
351
352
353
354 if (contains_standby_increment(start, start + size))
355 rc = -EPERM;
356 break;
357 case MEM_ONLINE:
358 case MEM_CANCEL_OFFLINE:
359 break;
360 case MEM_GOING_ONLINE:
361 rc = sclp_mem_change_state(start, size, 1);
362 break;
363 case MEM_CANCEL_ONLINE:
364 sclp_mem_change_state(start, size, 0);
365 break;
366 case MEM_OFFLINE:
367 sclp_mem_change_state(start, size, 0);
368 break;
369 default:
370 rc = -EINVAL;
371 break;
372 }
373 if (!rc)
374 sclp_mem_state_changed = 1;
375 mutex_unlock(&sclp_mem_mutex);
376 return rc ? NOTIFY_BAD : NOTIFY_OK;
377}
378
379static struct notifier_block sclp_mem_nb = {
380 .notifier_call = sclp_mem_notifier,
381};
382
383static void __init align_to_block_size(unsigned long long *start,
384 unsigned long long *size)
385{
386 unsigned long long start_align, size_align, alignment;
387
388 alignment = memory_block_size_bytes();
389 start_align = roundup(*start, alignment);
390 size_align = rounddown(*start + *size, alignment) - start_align;
391
392 pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
393 *start, size_align >> 20, *size >> 20);
394 *start = start_align;
395 *size = size_align;
396}
397
398static void __init add_memory_merged(u16 rn)
399{
400 static u16 first_rn, num;
401 unsigned long long start, size;
402
403 if (rn && first_rn && (first_rn + num == rn)) {
404 num++;
405 return;
406 }
407 if (!first_rn)
408 goto skip_add;
409 start = rn2addr(first_rn);
410 size = (unsigned long long) num * sclp_rzm;
411 if (start >= VMEM_MAX_PHYS)
412 goto skip_add;
413 if (start + size > VMEM_MAX_PHYS)
414 size = VMEM_MAX_PHYS - start;
415 if (memory_end_set && (start >= memory_end))
416 goto skip_add;
417 if (memory_end_set && (start + size > memory_end))
418 size = memory_end - start;
419 align_to_block_size(&start, &size);
420 if (size)
421 add_memory(0, start, size);
422skip_add:
423 first_rn = rn;
424 num = 1;
425}
426
427static void __init sclp_add_standby_memory(void)
428{
429 struct memory_increment *incr;
430
431 list_for_each_entry(incr, &sclp_mem_list, list)
432 if (incr->standby)
433 add_memory_merged(incr->rn);
434 add_memory_merged(0);
435}
436
437static void __init insert_increment(u16 rn, int standby, int assigned)
438{
439 struct memory_increment *incr, *new_incr;
440 struct list_head *prev;
441 u16 last_rn;
442
443 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
444 if (!new_incr)
445 return;
446 new_incr->rn = rn;
447 new_incr->standby = standby;
448 last_rn = 0;
449 prev = &sclp_mem_list;
450 list_for_each_entry(incr, &sclp_mem_list, list) {
451 if (assigned && incr->rn > rn)
452 break;
453 if (!assigned && incr->rn - last_rn > 1)
454 break;
455 last_rn = incr->rn;
456 prev = &incr->list;
457 }
458 if (!assigned)
459 new_incr->rn = last_rn + 1;
460 if (new_incr->rn > sclp_rnmax) {
461 kfree(new_incr);
462 return;
463 }
464 list_add(&new_incr->list, prev);
465}
466
467static int sclp_mem_freeze(struct device *dev)
468{
469 if (!sclp_mem_state_changed)
470 return 0;
471 pr_err("Memory hotplug state changed, suspend refused.\n");
472 return -EPERM;
473}
474
475struct read_storage_sccb {
476 struct sccb_header header;
477 u16 max_id;
478 u16 assigned;
479 u16 standby;
480 u16 :16;
481 u32 entries[0];
482} __packed;
483
484static const struct dev_pm_ops sclp_mem_pm_ops = {
485 .freeze = sclp_mem_freeze,
486};
487
488static struct platform_driver sclp_mem_pdrv = {
489 .driver = {
490 .name = "sclp_mem",
491 .pm = &sclp_mem_pm_ops,
492 },
493};
494
495static int __init sclp_detect_standby_memory(void)
496{
497 struct platform_device *sclp_pdev;
498 struct read_storage_sccb *sccb;
499 int i, id, assigned, rc;
500
501 if (OLDMEM_BASE)
502 return 0;
503 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
504 return 0;
505 rc = -ENOMEM;
506 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
507 if (!sccb)
508 goto out;
509 assigned = 0;
510 for (id = 0; id <= sclp_max_storage_id; id++) {
511 memset(sccb, 0, PAGE_SIZE);
512 sccb->header.length = PAGE_SIZE;
513 rc = sclp_sync_request(0x00040001 | id << 8, sccb);
514 if (rc)
515 goto out;
516 switch (sccb->header.response_code) {
517 case 0x0010:
518 set_bit(id, sclp_storage_ids);
519 for (i = 0; i < sccb->assigned; i++) {
520 if (!sccb->entries[i])
521 continue;
522 assigned++;
523 insert_increment(sccb->entries[i] >> 16, 0, 1);
524 }
525 break;
526 case 0x0310:
527 break;
528 case 0x0410:
529 for (i = 0; i < sccb->assigned; i++) {
530 if (!sccb->entries[i])
531 continue;
532 assigned++;
533 insert_increment(sccb->entries[i] >> 16, 1, 1);
534 }
535 break;
536 default:
537 rc = -EIO;
538 break;
539 }
540 if (!rc)
541 sclp_max_storage_id = sccb->max_id;
542 }
543 if (rc || list_empty(&sclp_mem_list))
544 goto out;
545 for (i = 1; i <= sclp_rnmax - assigned; i++)
546 insert_increment(0, 1, 0);
547 rc = register_memory_notifier(&sclp_mem_nb);
548 if (rc)
549 goto out;
550 rc = platform_driver_register(&sclp_mem_pdrv);
551 if (rc)
552 goto out;
553 sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
554 rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
555 if (rc)
556 goto out_driver;
557 sclp_add_standby_memory();
558 goto out;
559out_driver:
560 platform_driver_unregister(&sclp_mem_pdrv);
561out:
562 free_page((unsigned long) sccb);
563 return rc;
564}
565__initcall(sclp_detect_standby_memory);
566
567#endif
568
569
570
571
572#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
573#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
574
575#define SCLP_RECONFIG_PCI_ATPYE 2
576
577struct pci_cfg_sccb {
578 struct sccb_header header;
579 u8 atype;
580 u8 reserved1;
581 u16 reserved2;
582 u32 aid;
583} __packed;
584
585static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
586{
587 struct pci_cfg_sccb *sccb;
588 int rc;
589
590 if (!SCLP_HAS_PCI_RECONFIG)
591 return -EOPNOTSUPP;
592
593 sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
594 if (!sccb)
595 return -ENOMEM;
596
597 sccb->header.length = PAGE_SIZE;
598 sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
599 sccb->aid = fid;
600 rc = sclp_sync_request(cmd, sccb);
601 if (rc)
602 goto out;
603 switch (sccb->header.response_code) {
604 case 0x0020:
605 case 0x0120:
606 break;
607 default:
608 pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
609 cmd, sccb->header.response_code);
610 rc = -EIO;
611 break;
612 }
613out:
614 free_page((unsigned long) sccb);
615 return rc;
616}
617
618int sclp_pci_configure(u32 fid)
619{
620 return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
621}
622EXPORT_SYMBOL(sclp_pci_configure);
623
624int sclp_pci_deconfigure(u32 fid)
625{
626 return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
627}
628EXPORT_SYMBOL(sclp_pci_deconfigure);
629
630
631
632
633
634#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
635#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
636#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
637
638struct chp_cfg_sccb {
639 struct sccb_header header;
640 u8 ccm;
641 u8 reserved[6];
642 u8 cssid;
643} __attribute__((packed));
644
645static int do_chp_configure(sclp_cmdw_t cmd)
646{
647 struct chp_cfg_sccb *sccb;
648 int rc;
649
650 if (!SCLP_HAS_CHP_RECONFIG)
651 return -EOPNOTSUPP;
652
653 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
654 if (!sccb)
655 return -ENOMEM;
656 sccb->header.length = sizeof(*sccb);
657 rc = sclp_sync_request(cmd, sccb);
658 if (rc)
659 goto out;
660 switch (sccb->header.response_code) {
661 case 0x0020:
662 case 0x0120:
663 case 0x0440:
664 case 0x0450:
665 break;
666 default:
667 pr_warning("configure channel-path failed "
668 "(cmd=0x%08x, response=0x%04x)\n", cmd,
669 sccb->header.response_code);
670 rc = -EIO;
671 break;
672 }
673out:
674 free_page((unsigned long) sccb);
675 return rc;
676}
677
678
679
680
681
682
683
684
685int sclp_chp_configure(struct chp_id chpid)
686{
687 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
688}
689
690
691
692
693
694
695
696
697int sclp_chp_deconfigure(struct chp_id chpid)
698{
699 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
700}
701
702struct chp_info_sccb {
703 struct sccb_header header;
704 u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
705 u8 standby[SCLP_CHP_INFO_MASK_SIZE];
706 u8 configured[SCLP_CHP_INFO_MASK_SIZE];
707 u8 ccm;
708 u8 reserved[6];
709 u8 cssid;
710} __attribute__((packed));
711
712
713
714
715
716
717
718
719
720int sclp_chp_read_info(struct sclp_chp_info *info)
721{
722 struct chp_info_sccb *sccb;
723 int rc;
724
725 if (!SCLP_HAS_CHP_INFO)
726 return -EOPNOTSUPP;
727
728 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
729 if (!sccb)
730 return -ENOMEM;
731 sccb->header.length = sizeof(*sccb);
732 rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
733 if (rc)
734 goto out;
735 if (sccb->header.response_code != 0x0010) {
736 pr_warning("read channel-path info failed "
737 "(response=0x%04x)\n", sccb->header.response_code);
738 rc = -EIO;
739 goto out;
740 }
741 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
742 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
743 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
744out:
745 free_page((unsigned long) sccb);
746 return rc;
747}
748