1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/stddef.h>
20#include <linux/kernel.h>
21#include <linux/list.h>
22#include <linux/notifier.h>
23#include <linux/netdevice.h>
24#include <linux/rcupdate.h>
25#include <net/net_namespace.h>
26
27#include "security.h"
28#include "objsec.h"
29#include "netif.h"
30
31#define SEL_NETIF_HASH_SIZE 64
32#define SEL_NETIF_HASH_MAX 1024
33
34struct sel_netif {
35 struct list_head list;
36 struct netif_security_struct nsec;
37 struct rcu_head rcu_head;
38};
39
40static u32 sel_netif_total;
41static LIST_HEAD(sel_netif_list);
42static DEFINE_SPINLOCK(sel_netif_lock);
43static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
44
45
46
47
48
49
50
51
52
53
54static inline u32 sel_netif_hashfn(int ifindex)
55{
56 return (ifindex & (SEL_NETIF_HASH_SIZE - 1));
57}
58
59
60
61
62
63
64
65
66
67
68static inline struct sel_netif *sel_netif_find(int ifindex)
69{
70 int idx = sel_netif_hashfn(ifindex);
71 struct sel_netif *netif;
72
73 list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
74
75
76 if (likely(netif->nsec.ifindex == ifindex))
77 return netif;
78
79 return NULL;
80}
81
82
83
84
85
86
87
88
89
90
91static int sel_netif_insert(struct sel_netif *netif)
92{
93 int idx;
94
95 if (sel_netif_total >= SEL_NETIF_HASH_MAX)
96 return -ENOSPC;
97
98 idx = sel_netif_hashfn(netif->nsec.ifindex);
99 list_add_rcu(&netif->list, &sel_netif_hash[idx]);
100 sel_netif_total++;
101
102 return 0;
103}
104
105
106
107
108
109
110
111
112
113
114
115static void sel_netif_free(struct rcu_head *p)
116{
117 struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
118 kfree(netif);
119}
120
121
122
123
124
125
126
127
128
129static void sel_netif_destroy(struct sel_netif *netif)
130{
131 list_del_rcu(&netif->list);
132 sel_netif_total--;
133 call_rcu(&netif->rcu_head, sel_netif_free);
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148static int sel_netif_sid_slow(int ifindex, u32 *sid)
149{
150 int ret;
151 struct sel_netif *netif;
152 struct sel_netif *new = NULL;
153 struct net_device *dev;
154
155
156
157
158 dev = dev_get_by_index(&init_net, ifindex);
159 if (unlikely(dev == NULL)) {
160 printk(KERN_WARNING
161 "SELinux: failure in sel_netif_sid_slow(),"
162 " invalid network interface (%d)\n", ifindex);
163 return -ENOENT;
164 }
165
166 spin_lock_bh(&sel_netif_lock);
167 netif = sel_netif_find(ifindex);
168 if (netif != NULL) {
169 *sid = netif->nsec.sid;
170 ret = 0;
171 goto out;
172 }
173 new = kzalloc(sizeof(*new), GFP_ATOMIC);
174 if (new == NULL) {
175 ret = -ENOMEM;
176 goto out;
177 }
178 ret = security_netif_sid(dev->name, &new->nsec.sid);
179 if (ret != 0)
180 goto out;
181 new->nsec.ifindex = ifindex;
182 ret = sel_netif_insert(new);
183 if (ret != 0)
184 goto out;
185 *sid = new->nsec.sid;
186
187out:
188 spin_unlock_bh(&sel_netif_lock);
189 dev_put(dev);
190 if (unlikely(ret)) {
191 printk(KERN_WARNING
192 "SELinux: failure in sel_netif_sid_slow(),"
193 " unable to determine network interface label (%d)\n",
194 ifindex);
195 kfree(new);
196 }
197 return ret;
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213int sel_netif_sid(int ifindex, u32 *sid)
214{
215 struct sel_netif *netif;
216
217 rcu_read_lock();
218 netif = sel_netif_find(ifindex);
219 if (likely(netif != NULL)) {
220 *sid = netif->nsec.sid;
221 rcu_read_unlock();
222 return 0;
223 }
224 rcu_read_unlock();
225
226 return sel_netif_sid_slow(ifindex, sid);
227}
228
229
230
231
232
233
234
235
236
237
238static void sel_netif_kill(int ifindex)
239{
240 struct sel_netif *netif;
241
242 rcu_read_lock();
243 spin_lock_bh(&sel_netif_lock);
244 netif = sel_netif_find(ifindex);
245 if (netif)
246 sel_netif_destroy(netif);
247 spin_unlock_bh(&sel_netif_lock);
248 rcu_read_unlock();
249}
250
251
252
253
254
255
256
257
258static void sel_netif_flush(void)
259{
260 int idx;
261 struct sel_netif *netif;
262
263 spin_lock_bh(&sel_netif_lock);
264 for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
265 list_for_each_entry(netif, &sel_netif_hash[idx], list)
266 sel_netif_destroy(netif);
267 spin_unlock_bh(&sel_netif_lock);
268}
269
270static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
271 u16 class, u32 perms, u32 *retained)
272{
273 if (event == AVC_CALLBACK_RESET) {
274 sel_netif_flush();
275 synchronize_net();
276 }
277 return 0;
278}
279
280static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
281 unsigned long event, void *ptr)
282{
283 struct net_device *dev = ptr;
284
285 if (dev_net(dev) != &init_net)
286 return NOTIFY_DONE;
287
288 if (event == NETDEV_DOWN)
289 sel_netif_kill(dev->ifindex);
290
291 return NOTIFY_DONE;
292}
293
294static struct notifier_block sel_netif_netdev_notifier = {
295 .notifier_call = sel_netif_netdev_notifier_handler,
296};
297
298static __init int sel_netif_init(void)
299{
300 int i, err;
301
302 if (!selinux_enabled)
303 return 0;
304
305 for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
306 INIT_LIST_HEAD(&sel_netif_hash[i]);
307
308 register_netdevice_notifier(&sel_netif_netdev_notifier);
309
310 err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
311 SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
312 if (err)
313 panic("avc_add_callback() failed, error %d\n", err);
314
315 return err;
316}
317
318__initcall(sel_netif_init);
319
320