1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/init.h>
15#include <linux/types.h>
16#include <linux/slab.h>
17#include <linux/stddef.h>
18#include <linux/kernel.h>
19#include <linux/list.h>
20#include <linux/notifier.h>
21#include <linux/netdevice.h>
22#include <linux/rcupdate.h>
23#include <net/net_namespace.h>
24
25#include "security.h"
26#include "objsec.h"
27#include "netif.h"
28
29#define SEL_NETIF_HASH_SIZE 64
30#define SEL_NETIF_HASH_MAX 1024
31
32struct sel_netif {
33 struct list_head list;
34 struct netif_security_struct nsec;
35 struct rcu_head rcu_head;
36};
37
38static u32 sel_netif_total;
39static LIST_HEAD(sel_netif_list);
40static DEFINE_SPINLOCK(sel_netif_lock);
41static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
42
43
44
45
46
47
48
49
50
51
52
53static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
54{
55 return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
56}
57
58
59
60
61
62
63
64
65
66
67
68static inline struct sel_netif *sel_netif_find(const struct net *ns,
69 int ifindex)
70{
71 int idx = sel_netif_hashfn(ns, ifindex);
72 struct sel_netif *netif;
73
74 list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
75 if (net_eq(netif->nsec.ns, ns) &&
76 netif->nsec.ifindex == ifindex)
77 return netif;
78
79 return NULL;
80}
81
82
83
84
85
86
87
88
89
90
91static int sel_netif_insert(struct sel_netif *netif)
92{
93 int idx;
94
95 if (sel_netif_total >= SEL_NETIF_HASH_MAX)
96 return -ENOSPC;
97
98 idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
99 list_add_rcu(&netif->list, &sel_netif_hash[idx]);
100 sel_netif_total++;
101
102 return 0;
103}
104
105
106
107
108
109
110
111
112
113static void sel_netif_destroy(struct sel_netif *netif)
114{
115 list_del_rcu(&netif->list);
116 sel_netif_total--;
117 kfree_rcu(netif, rcu_head);
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
134{
135 int ret;
136 struct sel_netif *netif;
137 struct sel_netif *new = NULL;
138 struct net_device *dev;
139
140
141
142
143 dev = dev_get_by_index(ns, ifindex);
144 if (unlikely(dev == NULL)) {
145 pr_warn("SELinux: failure in %s(), invalid network interface (%d)\n",
146 __func__, ifindex);
147 return -ENOENT;
148 }
149
150 spin_lock_bh(&sel_netif_lock);
151 netif = sel_netif_find(ns, ifindex);
152 if (netif != NULL) {
153 *sid = netif->nsec.sid;
154 ret = 0;
155 goto out;
156 }
157 new = kzalloc(sizeof(*new), GFP_ATOMIC);
158 if (new == NULL) {
159 ret = -ENOMEM;
160 goto out;
161 }
162 ret = security_netif_sid(&selinux_state, dev->name, &new->nsec.sid);
163 if (ret != 0)
164 goto out;
165 new->nsec.ns = ns;
166 new->nsec.ifindex = ifindex;
167 ret = sel_netif_insert(new);
168 if (ret != 0)
169 goto out;
170 *sid = new->nsec.sid;
171
172out:
173 spin_unlock_bh(&sel_netif_lock);
174 dev_put(dev);
175 if (unlikely(ret)) {
176 pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n",
177 __func__, ifindex);
178 kfree(new);
179 }
180 return ret;
181}
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
198{
199 struct sel_netif *netif;
200
201 rcu_read_lock();
202 netif = sel_netif_find(ns, ifindex);
203 if (likely(netif != NULL)) {
204 *sid = netif->nsec.sid;
205 rcu_read_unlock();
206 return 0;
207 }
208 rcu_read_unlock();
209
210 return sel_netif_sid_slow(ns, ifindex, sid);
211}
212
213
214
215
216
217
218
219
220
221
222
223static void sel_netif_kill(const struct net *ns, int ifindex)
224{
225 struct sel_netif *netif;
226
227 rcu_read_lock();
228 spin_lock_bh(&sel_netif_lock);
229 netif = sel_netif_find(ns, ifindex);
230 if (netif)
231 sel_netif_destroy(netif);
232 spin_unlock_bh(&sel_netif_lock);
233 rcu_read_unlock();
234}
235
236
237
238
239
240
241
242
243void sel_netif_flush(void)
244{
245 int idx;
246 struct sel_netif *netif;
247
248 spin_lock_bh(&sel_netif_lock);
249 for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
250 list_for_each_entry(netif, &sel_netif_hash[idx], list)
251 sel_netif_destroy(netif);
252 spin_unlock_bh(&sel_netif_lock);
253}
254
255static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
256 unsigned long event, void *ptr)
257{
258 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
259
260 if (event == NETDEV_DOWN)
261 sel_netif_kill(dev_net(dev), dev->ifindex);
262
263 return NOTIFY_DONE;
264}
265
266static struct notifier_block sel_netif_netdev_notifier = {
267 .notifier_call = sel_netif_netdev_notifier_handler,
268};
269
270static __init int sel_netif_init(void)
271{
272 int i;
273
274 if (!selinux_enabled)
275 return 0;
276
277 for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
278 INIT_LIST_HEAD(&sel_netif_hash[i]);
279
280 register_netdevice_notifier(&sel_netif_netdev_notifier);
281
282 return 0;
283}
284
285__initcall(sel_netif_init);
286
287