1
2
3
4
5
6
7
8
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
15#include <linux/sched.h>
16
17#include <asm/processor.h>
18#include <asm/smp.h>
19
20#define LVL_1_INST 1
21#define LVL_1_DATA 2
22#define LVL_2 3
23#define LVL_3 4
24#define LVL_TRACE 5
25
26struct _cache_table
27{
28 unsigned char descriptor;
29 char cache_type;
30 short size;
31};
32
33
34static struct _cache_table cache_table[] __cpuinitdata =
35{
36 { 0x06, LVL_1_INST, 8 },
37 { 0x08, LVL_1_INST, 16 },
38 { 0x0a, LVL_1_DATA, 8 },
39 { 0x0c, LVL_1_DATA, 16 },
40 { 0x22, LVL_3, 512 },
41 { 0x23, LVL_3, 1024 },
42 { 0x25, LVL_3, 2048 },
43 { 0x29, LVL_3, 4096 },
44 { 0x2c, LVL_1_DATA, 32 },
45 { 0x30, LVL_1_INST, 32 },
46 { 0x39, LVL_2, 128 },
47 { 0x3a, LVL_2, 192 },
48 { 0x3b, LVL_2, 128 },
49 { 0x3c, LVL_2, 256 },
50 { 0x3d, LVL_2, 384 },
51 { 0x3e, LVL_2, 512 },
52 { 0x3f, LVL_2, 256 },
53 { 0x41, LVL_2, 128 },
54 { 0x42, LVL_2, 256 },
55 { 0x43, LVL_2, 512 },
56 { 0x44, LVL_2, 1024 },
57 { 0x45, LVL_2, 2048 },
58 { 0x46, LVL_3, 4096 },
59 { 0x47, LVL_3, 8192 },
60 { 0x49, LVL_3, 4096 },
61 { 0x4a, LVL_3, 6144 },
62 { 0x4b, LVL_3, 8192 },
63 { 0x4c, LVL_3, 12288 },
64 { 0x4d, LVL_3, 16384 },
65 { 0x60, LVL_1_DATA, 16 },
66 { 0x66, LVL_1_DATA, 8 },
67 { 0x67, LVL_1_DATA, 16 },
68 { 0x68, LVL_1_DATA, 32 },
69 { 0x70, LVL_TRACE, 12 },
70 { 0x71, LVL_TRACE, 16 },
71 { 0x72, LVL_TRACE, 32 },
72 { 0x73, LVL_TRACE, 64 },
73 { 0x78, LVL_2, 1024 },
74 { 0x79, LVL_2, 128 },
75 { 0x7a, LVL_2, 256 },
76 { 0x7b, LVL_2, 512 },
77 { 0x7c, LVL_2, 1024 },
78 { 0x7d, LVL_2, 2048 },
79 { 0x7f, LVL_2, 512 },
80 { 0x82, LVL_2, 256 },
81 { 0x83, LVL_2, 512 },
82 { 0x84, LVL_2, 1024 },
83 { 0x85, LVL_2, 2048 },
84 { 0x86, LVL_2, 512 },
85 { 0x87, LVL_2, 1024 },
86 { 0x00, 0, 0}
87};
88
89
90enum _cache_type
91{
92 CACHE_TYPE_NULL = 0,
93 CACHE_TYPE_DATA = 1,
94 CACHE_TYPE_INST = 2,
95 CACHE_TYPE_UNIFIED = 3
96};
97
98union _cpuid4_leaf_eax {
99 struct {
100 enum _cache_type type:5;
101 unsigned int level:3;
102 unsigned int is_self_initializing:1;
103 unsigned int is_fully_associative:1;
104 unsigned int reserved:4;
105 unsigned int num_threads_sharing:12;
106 unsigned int num_cores_on_die:6;
107 } split;
108 u32 full;
109};
110
111union _cpuid4_leaf_ebx {
112 struct {
113 unsigned int coherency_line_size:12;
114 unsigned int physical_line_partition:10;
115 unsigned int ways_of_associativity:10;
116 } split;
117 u32 full;
118};
119
120union _cpuid4_leaf_ecx {
121 struct {
122 unsigned int number_of_sets:32;
123 } split;
124 u32 full;
125};
126
127struct _cpuid4_info {
128 union _cpuid4_leaf_eax eax;
129 union _cpuid4_leaf_ebx ebx;
130 union _cpuid4_leaf_ecx ecx;
131 unsigned long size;
132 cpumask_t shared_cpu_map;
133};
134
135unsigned short num_cache_leaves;
136
137
138
139
140
141
142
143union l1_cache {
144 struct {
145 unsigned line_size : 8;
146 unsigned lines_per_tag : 8;
147 unsigned assoc : 8;
148 unsigned size_in_kb : 8;
149 };
150 unsigned val;
151};
152
153union l2_cache {
154 struct {
155 unsigned line_size : 8;
156 unsigned lines_per_tag : 4;
157 unsigned assoc : 4;
158 unsigned size_in_kb : 16;
159 };
160 unsigned val;
161};
162
163union l3_cache {
164 struct {
165 unsigned line_size : 8;
166 unsigned lines_per_tag : 4;
167 unsigned assoc : 4;
168 unsigned res : 2;
169 unsigned size_encoded : 14;
170 };
171 unsigned val;
172};
173
174static unsigned short assocs[] __cpuinitdata = {
175 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
176 [8] = 16, [0xa] = 32, [0xb] = 48,
177 [0xc] = 64,
178 [0xf] = 0xffff
179};
180
181static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
182static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
183
184static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
185 union _cpuid4_leaf_ebx *ebx,
186 union _cpuid4_leaf_ecx *ecx)
187{
188 unsigned dummy;
189 unsigned line_size, lines_per_tag, assoc, size_in_kb;
190 union l1_cache l1i, l1d;
191 union l2_cache l2;
192 union l3_cache l3;
193 union l1_cache *l1 = &l1d;
194
195 eax->full = 0;
196 ebx->full = 0;
197 ecx->full = 0;
198
199 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
200 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
201
202 switch (leaf) {
203 case 1:
204 l1 = &l1i;
205 case 0:
206 if (!l1->val)
207 return;
208 assoc = l1->assoc;
209 line_size = l1->line_size;
210 lines_per_tag = l1->lines_per_tag;
211 size_in_kb = l1->size_in_kb;
212 break;
213 case 2:
214 if (!l2.val)
215 return;
216 assoc = l2.assoc;
217 line_size = l2.line_size;
218 lines_per_tag = l2.lines_per_tag;
219
220 size_in_kb = current_cpu_data.x86_cache_size;
221 break;
222 case 3:
223 if (!l3.val)
224 return;
225 assoc = l3.assoc;
226 line_size = l3.line_size;
227 lines_per_tag = l3.lines_per_tag;
228 size_in_kb = l3.size_encoded * 512;
229 break;
230 default:
231 return;
232 }
233
234 eax->split.is_self_initializing = 1;
235 eax->split.type = types[leaf];
236 eax->split.level = levels[leaf];
237 if (leaf == 3)
238 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
239 else
240 eax->split.num_threads_sharing = 0;
241 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
242
243
244 if (assoc == 0xf)
245 eax->split.is_fully_associative = 1;
246 ebx->split.coherency_line_size = line_size - 1;
247 ebx->split.ways_of_associativity = assocs[assoc] - 1;
248 ebx->split.physical_line_partition = lines_per_tag - 1;
249 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
250 (ebx->split.ways_of_associativity + 1) - 1;
251}
252
253static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
254{
255 union _cpuid4_leaf_eax eax;
256 union _cpuid4_leaf_ebx ebx;
257 union _cpuid4_leaf_ecx ecx;
258 unsigned edx;
259
260 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
261 amd_cpuid4(index, &eax, &ebx, &ecx);
262 else
263 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
264 if (eax.split.type == CACHE_TYPE_NULL)
265 return -EIO;
266
267 this_leaf->eax = eax;
268 this_leaf->ebx = ebx;
269 this_leaf->ecx = ecx;
270 this_leaf->size = (ecx.split.number_of_sets + 1) *
271 (ebx.split.coherency_line_size + 1) *
272 (ebx.split.physical_line_partition + 1) *
273 (ebx.split.ways_of_associativity + 1);
274 return 0;
275}
276
277static int __cpuinit find_num_cache_leaves(void)
278{
279 unsigned int eax, ebx, ecx, edx;
280 union _cpuid4_leaf_eax cache_eax;
281 int i = -1;
282
283 do {
284 ++i;
285
286 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
287 cache_eax.full = eax;
288 } while (cache_eax.split.type != CACHE_TYPE_NULL);
289 return i;
290}
291
292unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
293{
294 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
295 unsigned int new_l1d = 0, new_l1i = 0;
296 unsigned int new_l2 = 0, new_l3 = 0, i;
297 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
298#ifdef CONFIG_X86_HT
299 unsigned int cpu = c->cpu_index;
300#endif
301
302 if (c->cpuid_level > 3) {
303 static int is_initialized;
304
305 if (is_initialized == 0) {
306
307 num_cache_leaves = find_num_cache_leaves();
308 is_initialized++;
309 }
310
311
312
313
314
315 for (i = 0; i < num_cache_leaves; i++) {
316 struct _cpuid4_info this_leaf;
317
318 int retval;
319
320 retval = cpuid4_cache_lookup(i, &this_leaf);
321 if (retval >= 0) {
322 switch(this_leaf.eax.split.level) {
323 case 1:
324 if (this_leaf.eax.split.type ==
325 CACHE_TYPE_DATA)
326 new_l1d = this_leaf.size/1024;
327 else if (this_leaf.eax.split.type ==
328 CACHE_TYPE_INST)
329 new_l1i = this_leaf.size/1024;
330 break;
331 case 2:
332 new_l2 = this_leaf.size/1024;
333 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
334 index_msb = get_count_order(num_threads_sharing);
335 l2_id = c->apicid >> index_msb;
336 break;
337 case 3:
338 new_l3 = this_leaf.size/1024;
339 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
340 index_msb = get_count_order(num_threads_sharing);
341 l3_id = c->apicid >> index_msb;
342 break;
343 default:
344 break;
345 }
346 }
347 }
348 }
349
350
351
352
353 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
354
355 int i, j, n;
356 int regs[4];
357 unsigned char *dp = (unsigned char *)regs;
358 int only_trace = 0;
359
360 if (num_cache_leaves != 0 && c->x86 == 15)
361 only_trace = 1;
362
363
364 n = cpuid_eax(2) & 0xFF;
365
366 for ( i = 0 ; i < n ; i++ ) {
367 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
368
369
370 for ( j = 0 ; j < 3 ; j++ ) {
371 if ( regs[j] < 0 ) regs[j] = 0;
372 }
373
374
375 for ( j = 1 ; j < 16 ; j++ ) {
376 unsigned char des = dp[j];
377 unsigned char k = 0;
378
379
380 while (cache_table[k].descriptor != 0)
381 {
382 if (cache_table[k].descriptor == des) {
383 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
384 break;
385 switch (cache_table[k].cache_type) {
386 case LVL_1_INST:
387 l1i += cache_table[k].size;
388 break;
389 case LVL_1_DATA:
390 l1d += cache_table[k].size;
391 break;
392 case LVL_2:
393 l2 += cache_table[k].size;
394 break;
395 case LVL_3:
396 l3 += cache_table[k].size;
397 break;
398 case LVL_TRACE:
399 trace += cache_table[k].size;
400 break;
401 }
402
403 break;
404 }
405
406 k++;
407 }
408 }
409 }
410 }
411
412 if (new_l1d)
413 l1d = new_l1d;
414
415 if (new_l1i)
416 l1i = new_l1i;
417
418 if (new_l2) {
419 l2 = new_l2;
420#ifdef CONFIG_X86_HT
421 per_cpu(cpu_llc_id, cpu) = l2_id;
422#endif
423 }
424
425 if (new_l3) {
426 l3 = new_l3;
427#ifdef CONFIG_X86_HT
428 per_cpu(cpu_llc_id, cpu) = l3_id;
429#endif
430 }
431
432 if (trace)
433 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
434 else if ( l1i )
435 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
436
437 if (l1d)
438 printk(", L1 D cache: %dK\n", l1d);
439 else
440 printk("\n");
441
442 if (l2)
443 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
444
445 if (l3)
446 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
447
448 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
449
450 return l2;
451}
452
453
454static struct _cpuid4_info *cpuid4_info[NR_CPUS];
455#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
456
457#ifdef CONFIG_SMP
458static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
459{
460 struct _cpuid4_info *this_leaf, *sibling_leaf;
461 unsigned long num_threads_sharing;
462 int index_msb, i;
463 struct cpuinfo_x86 *c = &cpu_data(cpu);
464
465 this_leaf = CPUID4_INFO_IDX(cpu, index);
466 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
467
468 if (num_threads_sharing == 1)
469 cpu_set(cpu, this_leaf->shared_cpu_map);
470 else {
471 index_msb = get_count_order(num_threads_sharing);
472
473 for_each_online_cpu(i) {
474 if (cpu_data(i).apicid >> index_msb ==
475 c->apicid >> index_msb) {
476 cpu_set(i, this_leaf->shared_cpu_map);
477 if (i != cpu && cpuid4_info[i]) {
478 sibling_leaf = CPUID4_INFO_IDX(i, index);
479 cpu_set(cpu, sibling_leaf->shared_cpu_map);
480 }
481 }
482 }
483 }
484}
485static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
486{
487 struct _cpuid4_info *this_leaf, *sibling_leaf;
488 int sibling;
489
490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
492 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
493 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
494 }
495}
496#else
497static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
498static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
499#endif
500
501static void __cpuinit free_cache_attributes(unsigned int cpu)
502{
503 int i;
504
505 for (i = 0; i < num_cache_leaves; i++)
506 cache_remove_shared_cpu_map(cpu, i);
507
508 kfree(cpuid4_info[cpu]);
509 cpuid4_info[cpu] = NULL;
510}
511
512static int __cpuinit detect_cache_attributes(unsigned int cpu)
513{
514 struct _cpuid4_info *this_leaf;
515 unsigned long j;
516 int retval;
517 cpumask_t oldmask;
518
519 if (num_cache_leaves == 0)
520 return -ENOENT;
521
522 cpuid4_info[cpu] = kzalloc(
523 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
524 if (cpuid4_info[cpu] == NULL)
525 return -ENOMEM;
526
527 oldmask = current->cpus_allowed;
528 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
529 if (retval)
530 goto out;
531
532
533 for (j = 0; j < num_cache_leaves; j++) {
534 this_leaf = CPUID4_INFO_IDX(cpu, j);
535 retval = cpuid4_cache_lookup(j, this_leaf);
536 if (unlikely(retval < 0)) {
537 int i;
538
539 for (i = 0; i < j; i++)
540 cache_remove_shared_cpu_map(cpu, i);
541 break;
542 }
543 cache_shared_cpu_map_setup(cpu, j);
544 }
545 set_cpus_allowed(current, oldmask);
546
547out:
548 if (retval) {
549 kfree(cpuid4_info[cpu]);
550 cpuid4_info[cpu] = NULL;
551 }
552
553 return retval;
554}
555
556#ifdef CONFIG_SYSFS
557
558#include <linux/kobject.h>
559#include <linux/sysfs.h>
560
561extern struct sysdev_class cpu_sysdev_class;
562
563
564static struct kobject * cache_kobject[NR_CPUS];
565
566struct _index_kobject {
567 struct kobject kobj;
568 unsigned int cpu;
569 unsigned short index;
570};
571
572
573static struct _index_kobject *index_kobject[NR_CPUS];
574#define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
575
576#define show_one_plus(file_name, object, val) \
577static ssize_t show_##file_name \
578 (struct _cpuid4_info *this_leaf, char *buf) \
579{ \
580 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
581}
582
583show_one_plus(level, eax.split.level, 0);
584show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
585show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
586show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
587show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
588
589static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
590{
591 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
592}
593
594static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
595{
596 char mask_str[NR_CPUS];
597 cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
598 return sprintf(buf, "%s\n", mask_str);
599}
600
601static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
602 switch(this_leaf->eax.split.type) {
603 case CACHE_TYPE_DATA:
604 return sprintf(buf, "Data\n");
605 break;
606 case CACHE_TYPE_INST:
607 return sprintf(buf, "Instruction\n");
608 break;
609 case CACHE_TYPE_UNIFIED:
610 return sprintf(buf, "Unified\n");
611 break;
612 default:
613 return sprintf(buf, "Unknown\n");
614 break;
615 }
616}
617
618struct _cache_attr {
619 struct attribute attr;
620 ssize_t (*show)(struct _cpuid4_info *, char *);
621 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
622};
623
624#define define_one_ro(_name) \
625static struct _cache_attr _name = \
626 __ATTR(_name, 0444, show_##_name, NULL)
627
628define_one_ro(level);
629define_one_ro(type);
630define_one_ro(coherency_line_size);
631define_one_ro(physical_line_partition);
632define_one_ro(ways_of_associativity);
633define_one_ro(number_of_sets);
634define_one_ro(size);
635define_one_ro(shared_cpu_map);
636
637static struct attribute * default_attrs[] = {
638 &type.attr,
639 &level.attr,
640 &coherency_line_size.attr,
641 &physical_line_partition.attr,
642 &ways_of_associativity.attr,
643 &number_of_sets.attr,
644 &size.attr,
645 &shared_cpu_map.attr,
646 NULL
647};
648
649#define to_object(k) container_of(k, struct _index_kobject, kobj)
650#define to_attr(a) container_of(a, struct _cache_attr, attr)
651
652static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
653{
654 struct _cache_attr *fattr = to_attr(attr);
655 struct _index_kobject *this_leaf = to_object(kobj);
656 ssize_t ret;
657
658 ret = fattr->show ?
659 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
660 buf) :
661 0;
662 return ret;
663}
664
665static ssize_t store(struct kobject * kobj, struct attribute * attr,
666 const char * buf, size_t count)
667{
668 return 0;
669}
670
671static struct sysfs_ops sysfs_ops = {
672 .show = show,
673 .store = store,
674};
675
676static struct kobj_type ktype_cache = {
677 .sysfs_ops = &sysfs_ops,
678 .default_attrs = default_attrs,
679};
680
681static struct kobj_type ktype_percpu_entry = {
682 .sysfs_ops = &sysfs_ops,
683};
684
685static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
686{
687 kfree(cache_kobject[cpu]);
688 kfree(index_kobject[cpu]);
689 cache_kobject[cpu] = NULL;
690 index_kobject[cpu] = NULL;
691 free_cache_attributes(cpu);
692}
693
694static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
695{
696 int err;
697
698 if (num_cache_leaves == 0)
699 return -ENOENT;
700
701 err = detect_cache_attributes(cpu);
702 if (err)
703 return err;
704
705
706 cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
707 if (unlikely(cache_kobject[cpu] == NULL))
708 goto err_out;
709
710 index_kobject[cpu] = kzalloc(
711 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
712 if (unlikely(index_kobject[cpu] == NULL))
713 goto err_out;
714
715 return 0;
716
717err_out:
718 cpuid4_cache_sysfs_exit(cpu);
719 return -ENOMEM;
720}
721
722static cpumask_t cache_dev_map = CPU_MASK_NONE;
723
724
725static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
726{
727 unsigned int cpu = sys_dev->id;
728 unsigned long i, j;
729 struct _index_kobject *this_object;
730 int retval;
731
732 retval = cpuid4_cache_sysfs_init(cpu);
733 if (unlikely(retval < 0))
734 return retval;
735
736 cache_kobject[cpu]->parent = &sys_dev->kobj;
737 kobject_set_name(cache_kobject[cpu], "%s", "cache");
738 cache_kobject[cpu]->ktype = &ktype_percpu_entry;
739 retval = kobject_register(cache_kobject[cpu]);
740 if (retval < 0) {
741 cpuid4_cache_sysfs_exit(cpu);
742 return retval;
743 }
744
745 for (i = 0; i < num_cache_leaves; i++) {
746 this_object = INDEX_KOBJECT_PTR(cpu,i);
747 this_object->cpu = cpu;
748 this_object->index = i;
749 this_object->kobj.parent = cache_kobject[cpu];
750 kobject_set_name(&(this_object->kobj), "index%1lu", i);
751 this_object->kobj.ktype = &ktype_cache;
752 retval = kobject_register(&(this_object->kobj));
753 if (unlikely(retval)) {
754 for (j = 0; j < i; j++) {
755 kobject_unregister(
756 &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
757 }
758 kobject_unregister(cache_kobject[cpu]);
759 cpuid4_cache_sysfs_exit(cpu);
760 break;
761 }
762 }
763 if (!retval)
764 cpu_set(cpu, cache_dev_map);
765
766 return retval;
767}
768
769static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
770{
771 unsigned int cpu = sys_dev->id;
772 unsigned long i;
773
774 if (cpuid4_info[cpu] == NULL)
775 return;
776 if (!cpu_isset(cpu, cache_dev_map))
777 return;
778 cpu_clear(cpu, cache_dev_map);
779
780 for (i = 0; i < num_cache_leaves; i++)
781 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
782 kobject_unregister(cache_kobject[cpu]);
783 cpuid4_cache_sysfs_exit(cpu);
784}
785
786static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
787 unsigned long action, void *hcpu)
788{
789 unsigned int cpu = (unsigned long)hcpu;
790 struct sys_device *sys_dev;
791
792 sys_dev = get_cpu_sysdev(cpu);
793 switch (action) {
794 case CPU_ONLINE:
795 case CPU_ONLINE_FROZEN:
796 cache_add_dev(sys_dev);
797 break;
798 case CPU_DEAD:
799 case CPU_DEAD_FROZEN:
800 cache_remove_dev(sys_dev);
801 break;
802 }
803 return NOTIFY_OK;
804}
805
806static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
807{
808 .notifier_call = cacheinfo_cpu_callback,
809};
810
811static int __cpuinit cache_sysfs_init(void)
812{
813 int i;
814
815 if (num_cache_leaves == 0)
816 return 0;
817
818 for_each_online_cpu(i) {
819 int err;
820 struct sys_device *sys_dev = get_cpu_sysdev(i);
821
822 err = cache_add_dev(sys_dev);
823 if (err)
824 return err;
825 }
826 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
827 return 0;
828}
829
830device_initcall(cache_sysfs_init);
831
832#endif
833