1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/slab.h>
20#include <linux/stddef.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/notifier.h>
24#include <linux/netdevice.h>
25#include <linux/rcupdate.h>
26#include <net/net_namespace.h>
27
28#include "security.h"
29#include "objsec.h"
30#include "netif.h"
31
32#define SEL_NETIF_HASH_SIZE 64
33#define SEL_NETIF_HASH_MAX 1024
34
35struct sel_netif {
36 struct list_head list;
37 struct netif_security_struct nsec;
38 struct rcu_head rcu_head;
39};
40
41static u32 sel_netif_total;
42static LIST_HEAD(sel_netif_list);
43static DEFINE_SPINLOCK(sel_netif_lock);
44static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
45
46
47
48
49
50
51
52
53
54
55static inline u32 sel_netif_hashfn(int ifindex)
56{
57 return (ifindex & (SEL_NETIF_HASH_SIZE - 1));
58}
59
60
61
62
63
64
65
66
67
68
69static inline struct sel_netif *sel_netif_find(int ifindex)
70{
71 int idx = sel_netif_hashfn(ifindex);
72 struct sel_netif *netif;
73
74 list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
75
76
77 if (likely(netif->nsec.ifindex == ifindex))
78 return netif;
79
80 return NULL;
81}
82
83
84
85
86
87
88
89
90
91
92static int sel_netif_insert(struct sel_netif *netif)
93{
94 int idx;
95
96 if (sel_netif_total >= SEL_NETIF_HASH_MAX)
97 return -ENOSPC;
98
99 idx = sel_netif_hashfn(netif->nsec.ifindex);
100 list_add_rcu(&netif->list, &sel_netif_hash[idx]);
101 sel_netif_total++;
102
103 return 0;
104}
105
106
107
108
109
110
111
112
113
114
115
116static void sel_netif_free(struct rcu_head *p)
117{
118 struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
119 kfree(netif);
120}
121
122
123
124
125
126
127
128
129
130static void sel_netif_destroy(struct sel_netif *netif)
131{
132 list_del_rcu(&netif->list);
133 sel_netif_total--;
134 call_rcu(&netif->rcu_head, sel_netif_free);
135}
136
137
138
139
140
141
142
143
144
145
146
147
148
149static int sel_netif_sid_slow(int ifindex, u32 *sid)
150{
151 int ret;
152 struct sel_netif *netif;
153 struct sel_netif *new = NULL;
154 struct net_device *dev;
155
156
157
158
159 dev = dev_get_by_index(&init_net, ifindex);
160 if (unlikely(dev == NULL)) {
161 printk(KERN_WARNING
162 "SELinux: failure in sel_netif_sid_slow(),"
163 " invalid network interface (%d)\n", ifindex);
164 return -ENOENT;
165 }
166
167 spin_lock_bh(&sel_netif_lock);
168 netif = sel_netif_find(ifindex);
169 if (netif != NULL) {
170 *sid = netif->nsec.sid;
171 ret = 0;
172 goto out;
173 }
174 new = kzalloc(sizeof(*new), GFP_ATOMIC);
175 if (new == NULL) {
176 ret = -ENOMEM;
177 goto out;
178 }
179 ret = security_netif_sid(dev->name, &new->nsec.sid);
180 if (ret != 0)
181 goto out;
182 new->nsec.ifindex = ifindex;
183 ret = sel_netif_insert(new);
184 if (ret != 0)
185 goto out;
186 *sid = new->nsec.sid;
187
188out:
189 spin_unlock_bh(&sel_netif_lock);
190 dev_put(dev);
191 if (unlikely(ret)) {
192 printk(KERN_WARNING
193 "SELinux: failure in sel_netif_sid_slow(),"
194 " unable to determine network interface label (%d)\n",
195 ifindex);
196 kfree(new);
197 }
198 return ret;
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214int sel_netif_sid(int ifindex, u32 *sid)
215{
216 struct sel_netif *netif;
217
218 rcu_read_lock();
219 netif = sel_netif_find(ifindex);
220 if (likely(netif != NULL)) {
221 *sid = netif->nsec.sid;
222 rcu_read_unlock();
223 return 0;
224 }
225 rcu_read_unlock();
226
227 return sel_netif_sid_slow(ifindex, sid);
228}
229
230
231
232
233
234
235
236
237
238
239static void sel_netif_kill(int ifindex)
240{
241 struct sel_netif *netif;
242
243 rcu_read_lock();
244 spin_lock_bh(&sel_netif_lock);
245 netif = sel_netif_find(ifindex);
246 if (netif)
247 sel_netif_destroy(netif);
248 spin_unlock_bh(&sel_netif_lock);
249 rcu_read_unlock();
250}
251
252
253
254
255
256
257
258
259static void sel_netif_flush(void)
260{
261 int idx;
262 struct sel_netif *netif;
263
264 spin_lock_bh(&sel_netif_lock);
265 for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
266 list_for_each_entry(netif, &sel_netif_hash[idx], list)
267 sel_netif_destroy(netif);
268 spin_unlock_bh(&sel_netif_lock);
269}
270
271static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
272 u16 class, u32 perms, u32 *retained)
273{
274 if (event == AVC_CALLBACK_RESET) {
275 sel_netif_flush();
276 synchronize_net();
277 }
278 return 0;
279}
280
281static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
282 unsigned long event, void *ptr)
283{
284 struct net_device *dev = ptr;
285
286 if (dev_net(dev) != &init_net)
287 return NOTIFY_DONE;
288
289 if (event == NETDEV_DOWN)
290 sel_netif_kill(dev->ifindex);
291
292 return NOTIFY_DONE;
293}
294
295static struct notifier_block sel_netif_netdev_notifier = {
296 .notifier_call = sel_netif_netdev_notifier_handler,
297};
298
299static __init int sel_netif_init(void)
300{
301 int i, err;
302
303 if (!selinux_enabled)
304 return 0;
305
306 for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
307 INIT_LIST_HEAD(&sel_netif_hash[i]);
308
309 register_netdevice_notifier(&sel_netif_netdev_notifier);
310
311 err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
312 SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
313 if (err)
314 panic("avc_add_callback() failed, error %d\n", err);
315
316 return err;
317}
318
319__initcall(sel_netif_init);
320
321