1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/slab.h>
20#include <linux/stddef.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/notifier.h>
24#include <linux/netdevice.h>
25#include <linux/rcupdate.h>
26#include <net/net_namespace.h>
27
28#include "security.h"
29#include "objsec.h"
30#include "netif.h"
31
32#define SEL_NETIF_HASH_SIZE 64
33#define SEL_NETIF_HASH_MAX 1024
34
35struct sel_netif {
36 struct list_head list;
37 struct netif_security_struct nsec;
38 struct rcu_head rcu_head;
39};
40
41static u32 sel_netif_total;
42static LIST_HEAD(sel_netif_list);
43static DEFINE_SPINLOCK(sel_netif_lock);
44static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
45
46
47
48
49
50
51
52
53
54
55static inline u32 sel_netif_hashfn(int ifindex)
56{
57 return (ifindex & (SEL_NETIF_HASH_SIZE - 1));
58}
59
60
61
62
63
64
65
66
67
68
69static inline struct sel_netif *sel_netif_find(int ifindex)
70{
71 int idx = sel_netif_hashfn(ifindex);
72 struct sel_netif *netif;
73
74 list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
75
76
77 if (likely(netif->nsec.ifindex == ifindex))
78 return netif;
79
80 return NULL;
81}
82
83
84
85
86
87
88
89
90
91
92static int sel_netif_insert(struct sel_netif *netif)
93{
94 int idx;
95
96 if (sel_netif_total >= SEL_NETIF_HASH_MAX)
97 return -ENOSPC;
98
99 idx = sel_netif_hashfn(netif->nsec.ifindex);
100 list_add_rcu(&netif->list, &sel_netif_hash[idx]);
101 sel_netif_total++;
102
103 return 0;
104}
105
106
107
108
109
110
111
112
113
114static void sel_netif_destroy(struct sel_netif *netif)
115{
116 list_del_rcu(&netif->list);
117 sel_netif_total--;
118 kfree_rcu(netif, rcu_head);
119}
120
121
122
123
124
125
126
127
128
129
130
131
132
133static int sel_netif_sid_slow(int ifindex, u32 *sid)
134{
135 int ret;
136 struct sel_netif *netif;
137 struct sel_netif *new = NULL;
138 struct net_device *dev;
139
140
141
142
143 dev = dev_get_by_index(&init_net, ifindex);
144 if (unlikely(dev == NULL)) {
145 printk(KERN_WARNING
146 "SELinux: failure in sel_netif_sid_slow(),"
147 " invalid network interface (%d)\n", ifindex);
148 return -ENOENT;
149 }
150
151 spin_lock_bh(&sel_netif_lock);
152 netif = sel_netif_find(ifindex);
153 if (netif != NULL) {
154 *sid = netif->nsec.sid;
155 ret = 0;
156 goto out;
157 }
158 new = kzalloc(sizeof(*new), GFP_ATOMIC);
159 if (new == NULL) {
160 ret = -ENOMEM;
161 goto out;
162 }
163 ret = security_netif_sid(dev->name, &new->nsec.sid);
164 if (ret != 0)
165 goto out;
166 new->nsec.ifindex = ifindex;
167 ret = sel_netif_insert(new);
168 if (ret != 0)
169 goto out;
170 *sid = new->nsec.sid;
171
172out:
173 spin_unlock_bh(&sel_netif_lock);
174 dev_put(dev);
175 if (unlikely(ret)) {
176 printk(KERN_WARNING
177 "SELinux: failure in sel_netif_sid_slow(),"
178 " unable to determine network interface label (%d)\n",
179 ifindex);
180 kfree(new);
181 }
182 return ret;
183}
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198int sel_netif_sid(int ifindex, u32 *sid)
199{
200 struct sel_netif *netif;
201
202 rcu_read_lock();
203 netif = sel_netif_find(ifindex);
204 if (likely(netif != NULL)) {
205 *sid = netif->nsec.sid;
206 rcu_read_unlock();
207 return 0;
208 }
209 rcu_read_unlock();
210
211 return sel_netif_sid_slow(ifindex, sid);
212}
213
214
215
216
217
218
219
220
221
222
223static void sel_netif_kill(int ifindex)
224{
225 struct sel_netif *netif;
226
227 rcu_read_lock();
228 spin_lock_bh(&sel_netif_lock);
229 netif = sel_netif_find(ifindex);
230 if (netif)
231 sel_netif_destroy(netif);
232 spin_unlock_bh(&sel_netif_lock);
233 rcu_read_unlock();
234}
235
236
237
238
239
240
241
242
243static void sel_netif_flush(void)
244{
245 int idx;
246 struct sel_netif *netif;
247
248 spin_lock_bh(&sel_netif_lock);
249 for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
250 list_for_each_entry(netif, &sel_netif_hash[idx], list)
251 sel_netif_destroy(netif);
252 spin_unlock_bh(&sel_netif_lock);
253}
254
255static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
256 u16 class, u32 perms, u32 *retained)
257{
258 if (event == AVC_CALLBACK_RESET) {
259 sel_netif_flush();
260 synchronize_net();
261 }
262 return 0;
263}
264
265static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
266 unsigned long event, void *ptr)
267{
268 struct net_device *dev = ptr;
269
270 if (dev_net(dev) != &init_net)
271 return NOTIFY_DONE;
272
273 if (event == NETDEV_DOWN)
274 sel_netif_kill(dev->ifindex);
275
276 return NOTIFY_DONE;
277}
278
279static struct notifier_block sel_netif_netdev_notifier = {
280 .notifier_call = sel_netif_netdev_notifier_handler,
281};
282
283static __init int sel_netif_init(void)
284{
285 int i, err;
286
287 if (!selinux_enabled)
288 return 0;
289
290 for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
291 INIT_LIST_HEAD(&sel_netif_hash[i]);
292
293 register_netdevice_notifier(&sel_netif_netdev_notifier);
294
295 err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
296 SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
297 if (err)
298 panic("avc_add_callback() failed, error %d\n", err);
299
300 return err;
301}
302
303__initcall(sel_netif_init);
304
305