1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/slab.h>
20#include <linux/stddef.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/notifier.h>
24#include <linux/netdevice.h>
25#include <linux/rcupdate.h>
26#include <net/net_namespace.h>
27
28#include "security.h"
29#include "objsec.h"
30#include "netif.h"
31
32#define SEL_NETIF_HASH_SIZE 64
33#define SEL_NETIF_HASH_MAX 1024
34
35struct sel_netif {
36 struct list_head list;
37 struct netif_security_struct nsec;
38 struct rcu_head rcu_head;
39};
40
41static u32 sel_netif_total;
42static LIST_HEAD(sel_netif_list);
43static DEFINE_SPINLOCK(sel_netif_lock);
44static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
45
46
47
48
49
50
51
52
53
54
55
56static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
57{
58 return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
59}
60
61
62
63
64
65
66
67
68
69
70
71static inline struct sel_netif *sel_netif_find(const struct net *ns,
72 int ifindex)
73{
74 int idx = sel_netif_hashfn(ns, ifindex);
75 struct sel_netif *netif;
76
77 list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
78 if (net_eq(netif->nsec.ns, ns) &&
79 netif->nsec.ifindex == ifindex)
80 return netif;
81
82 return NULL;
83}
84
85
86
87
88
89
90
91
92
93
94static int sel_netif_insert(struct sel_netif *netif)
95{
96 int idx;
97
98 if (sel_netif_total >= SEL_NETIF_HASH_MAX)
99 return -ENOSPC;
100
101 idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
102 list_add_rcu(&netif->list, &sel_netif_hash[idx]);
103 sel_netif_total++;
104
105 return 0;
106}
107
108
109
110
111
112
113
114
115
116static void sel_netif_destroy(struct sel_netif *netif)
117{
118 list_del_rcu(&netif->list);
119 sel_netif_total--;
120 kfree_rcu(netif, rcu_head);
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
137{
138 int ret;
139 struct sel_netif *netif;
140 struct sel_netif *new = NULL;
141 struct net_device *dev;
142
143
144
145
146 dev = dev_get_by_index(ns, ifindex);
147 if (unlikely(dev == NULL)) {
148 printk(KERN_WARNING
149 "SELinux: failure in sel_netif_sid_slow(),"
150 " invalid network interface (%d)\n", ifindex);
151 return -ENOENT;
152 }
153
154 spin_lock_bh(&sel_netif_lock);
155 netif = sel_netif_find(ns, ifindex);
156 if (netif != NULL) {
157 *sid = netif->nsec.sid;
158 ret = 0;
159 goto out;
160 }
161 new = kzalloc(sizeof(*new), GFP_ATOMIC);
162 if (new == NULL) {
163 ret = -ENOMEM;
164 goto out;
165 }
166 ret = security_netif_sid(&selinux_state, dev->name, &new->nsec.sid);
167 if (ret != 0)
168 goto out;
169 new->nsec.ns = ns;
170 new->nsec.ifindex = ifindex;
171 ret = sel_netif_insert(new);
172 if (ret != 0)
173 goto out;
174 *sid = new->nsec.sid;
175
176out:
177 spin_unlock_bh(&sel_netif_lock);
178 dev_put(dev);
179 if (unlikely(ret)) {
180 printk(KERN_WARNING
181 "SELinux: failure in sel_netif_sid_slow(),"
182 " unable to determine network interface label (%d)\n",
183 ifindex);
184 kfree(new);
185 }
186 return ret;
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
204{
205 struct sel_netif *netif;
206
207 rcu_read_lock();
208 netif = sel_netif_find(ns, ifindex);
209 if (likely(netif != NULL)) {
210 *sid = netif->nsec.sid;
211 rcu_read_unlock();
212 return 0;
213 }
214 rcu_read_unlock();
215
216 return sel_netif_sid_slow(ns, ifindex, sid);
217}
218
219
220
221
222
223
224
225
226
227
228
229static void sel_netif_kill(const struct net *ns, int ifindex)
230{
231 struct sel_netif *netif;
232
233 rcu_read_lock();
234 spin_lock_bh(&sel_netif_lock);
235 netif = sel_netif_find(ns, ifindex);
236 if (netif)
237 sel_netif_destroy(netif);
238 spin_unlock_bh(&sel_netif_lock);
239 rcu_read_unlock();
240}
241
242
243
244
245
246
247
248
249void sel_netif_flush(void)
250{
251 int idx;
252 struct sel_netif *netif;
253
254 spin_lock_bh(&sel_netif_lock);
255 for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
256 list_for_each_entry(netif, &sel_netif_hash[idx], list)
257 sel_netif_destroy(netif);
258 spin_unlock_bh(&sel_netif_lock);
259}
260
261static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
262 unsigned long event, void *ptr)
263{
264 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
265
266 if (event == NETDEV_DOWN)
267 sel_netif_kill(dev_net(dev), dev->ifindex);
268
269 return NOTIFY_DONE;
270}
271
272static struct notifier_block sel_netif_netdev_notifier = {
273 .notifier_call = sel_netif_netdev_notifier_handler,
274};
275
276static __init int sel_netif_init(void)
277{
278 int i;
279
280 if (!selinux_enabled)
281 return 0;
282
283 for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
284 INIT_LIST_HEAD(&sel_netif_hash[i]);
285
286 register_netdevice_notifier(&sel_netif_netdev_notifier);
287
288 return 0;
289}
290
291__initcall(sel_netif_init);
292
293