dev_cgroup         53 security/device_cgroup.c static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
dev_cgroup         55 security/device_cgroup.c 	return container_of(s, struct dev_cgroup, css);
dev_cgroup         58 security/device_cgroup.c static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
dev_cgroup         63 security/device_cgroup.c static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
dev_cgroup        122 security/device_cgroup.c 	spin_lock(&dev_cgroup->lock);
dev_cgroup        123 security/device_cgroup.c 	list_for_each_entry(walk, &dev_cgroup->whitelist, list) {
dev_cgroup        137 security/device_cgroup.c 		list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
dev_cgroup        138 security/device_cgroup.c 	spin_unlock(&dev_cgroup->lock);
dev_cgroup        159 security/device_cgroup.c 	spin_lock(&dev_cgroup->lock);
dev_cgroup        160 security/device_cgroup.c 	list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
dev_cgroup        177 security/device_cgroup.c 	spin_unlock(&dev_cgroup->lock);
dev_cgroup        186 security/device_cgroup.c 	struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
dev_cgroup        190 security/device_cgroup.c 	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
dev_cgroup        191 security/device_cgroup.c 	if (!dev_cgroup)
dev_cgroup        193 security/device_cgroup.c 	INIT_LIST_HEAD(&dev_cgroup->whitelist);
dev_cgroup        200 security/device_cgroup.c 			kfree(dev_cgroup);
dev_cgroup        206 security/device_cgroup.c 		list_add(&wh->list, &dev_cgroup->whitelist);
dev_cgroup        209 security/device_cgroup.c 		ret = dev_whitelist_copy(&dev_cgroup->whitelist,
dev_cgroup        212 security/device_cgroup.c 			kfree(dev_cgroup);
dev_cgroup        217 security/device_cgroup.c 	spin_lock_init(&dev_cgroup->lock);
dev_cgroup        218 security/device_cgroup.c 	return &dev_cgroup->css;
dev_cgroup        224 security/device_cgroup.c 	struct dev_cgroup *dev_cgroup;
dev_cgroup        227 security/device_cgroup.c 	dev_cgroup = cgroup_to_devcgroup(cgroup);
dev_cgroup        228 security/device_cgroup.c 	list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
dev_cgroup        232 security/device_cgroup.c 	kfree(dev_cgroup);
dev_cgroup        276 security/device_cgroup.c 	struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
dev_cgroup        332 security/device_cgroup.c 	struct dev_cgroup *parent;
dev_cgroup        360 security/device_cgroup.c 	struct dev_cgroup *cur_devcgroup;
dev_cgroup        503 security/device_cgroup.c 	struct dev_cgroup *dev_cgroup;
dev_cgroup        514 security/device_cgroup.c 	dev_cgroup = task_devcgroup(current);
dev_cgroup        516 security/device_cgroup.c 	list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
dev_cgroup        543 security/device_cgroup.c 	struct dev_cgroup *dev_cgroup;
dev_cgroup        548 security/device_cgroup.c 	dev_cgroup = task_devcgroup(current);
dev_cgroup        550 security/device_cgroup.c 	list_for_each_entry(wh, &dev_cgroup->whitelist, list) {