GFP_ATOMIC        889 arch/x86/kernel/cpu/common.c 				__get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
GFP_ATOMIC       1008 arch/x86/kernel/cpu/common.c 				estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
GFP_ATOMIC        317 arch/x86/kernel/mca_32.c 	mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
GFP_ATOMIC        355 arch/x86/kernel/mca_32.c 		mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
GFP_ATOMIC        384 arch/x86/kernel/mca_32.c 		mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
GFP_ATOMIC        782 arch/x86/kernel/smpboot.c 	newpda = kmalloc_node(size, GFP_ATOMIC, node);
GFP_ATOMIC        157 arch/x86/mm/init_64.c 		ptr = (void *) get_zeroed_page(GFP_ATOMIC);
GFP_ATOMIC        304 arch/x86/mm/init_64.c 		adr = (void *)get_zeroed_page(GFP_ATOMIC);
GFP_ATOMIC        323 arch/x86/mm/kmmio.c 	f = kmalloc(sizeof(*f), GFP_ATOMIC);
GFP_ATOMIC        462 arch/x86/mm/kmmio.c 	drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
GFP_ATOMIC         40 arch/x86/power/hibernate_32.c 	pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC);
GFP_ATOMIC         63 arch/x86/power/hibernate_32.c 		pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
GFP_ATOMIC        149 arch/x86/power/hibernate_32.c 	resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
GFP_ATOMIC         54 arch/x86/power/hibernate_64.c 		pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
GFP_ATOMIC         76 arch/x86/power/hibernate_64.c 	temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
GFP_ATOMIC         89 arch/x86/power/hibernate_64.c 		pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
GFP_ATOMIC        111 arch/x86/power/hibernate_64.c 	relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
GFP_ATOMIC        285 arch/x86/vdso/vdso32-setup.c 	void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
GFP_ATOMIC        203 block/as-iosched.c 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
GFP_ATOMIC        229 block/as-iosched.c 	struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
GFP_ATOMIC        734 block/blk-core.c 			ioc = current_io_context(GFP_ATOMIC, q->node);
GFP_ATOMIC        101 block/blk-ioc.c 		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
GFP_ATOMIC        119 block/blk-tag.c 	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
GFP_ATOMIC        124 block/blk-tag.c 	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
GFP_ATOMIC        144 block/blk-tag.c 	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
GFP_ATOMIC       1411 block/cfq-iosched.c 		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
GFP_ATOMIC         38 crypto/ablkcipher.c 	buffer = kmalloc(absize, GFP_ATOMIC);
GFP_ATOMIC         36 crypto/aead.c  	buffer = kmalloc(absize, GFP_ATOMIC);
GFP_ATOMIC        125 crypto/ahash.c 	buffer = kmalloc(absize, GFP_ATOMIC);
GFP_ATOMIC        164 crypto/blkcipher.c 	walk->buffer = kmalloc(n, GFP_ATOMIC);
GFP_ATOMIC        245 crypto/blkcipher.c 			walk->page = (void *)__get_free_page(GFP_ATOMIC);
GFP_ATOMIC        290 crypto/blkcipher.c 	walk->buffer = kmalloc(size, GFP_ATOMIC);
GFP_ATOMIC        369 crypto/blkcipher.c 	buffer = kmalloc(absize, GFP_ATOMIC);
GFP_ATOMIC         33 crypto/cipher.c 	buffer = kmalloc(absize, GFP_ATOMIC);
GFP_ATOMIC         38 crypto/hash.c  	buffer = kmalloc(absize, GFP_ATOMIC);
GFP_ATOMIC        120 crypto/seqiv.c 								  GFP_ATOMIC);
GFP_ATOMIC        166 crypto/seqiv.c 								  GFP_ATOMIC);
GFP_ATOMIC        406 fs/cifs/asn1.c 	*oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
GFP_ATOMIC        553 fs/dlm/lowcomms.c 		con->rx_page = alloc_page(GFP_ATOMIC);
GFP_ATOMIC       2156 fs/dquot.c     	dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
GFP_ATOMIC        212 fs/inode.c     	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
GFP_ATOMIC         51 fs/ioprio.c    		ioc = alloc_io_context(GFP_ATOMIC, -1);
GFP_ATOMIC       2320 fs/namespace.c 	mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
GFP_ATOMIC       1253 fs/nfs/inode.c 	INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
GFP_ATOMIC        138 fs/ntfs/inode.c 		ni->name = kmalloc(i + sizeof(ntfschar), GFP_ATOMIC);
GFP_ATOMIC        252 fs/ocfs2/cluster/heartbeat.c 	bio = bio_alloc(GFP_ATOMIC, 16);
GFP_ATOMIC        258 fs/ocfs2/cluster/tcp.c 		if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) {
GFP_ATOMIC       1028 fs/ocfs2/cluster/tcp.c 	vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
GFP_ATOMIC       1035 fs/ocfs2/cluster/tcp.c 	msg = kmalloc(sizeof(struct o2net_msg), GFP_ATOMIC);
GFP_ATOMIC       1591 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_allocation = GFP_ATOMIC;
GFP_ATOMIC       1777 fs/ocfs2/cluster/tcp.c 	new_sock->sk->sk_allocation = GFP_ATOMIC;
GFP_ATOMIC       1914 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_allocation = GFP_ATOMIC;
GFP_ATOMIC        283 fs/proc/kcore.c 		elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
GFP_ATOMIC       2065 fs/reiserfs/fix_node.c 		buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
GFP_ATOMIC        936 fs/reiserfs/inode.c 				un = kzalloc(min(blocks_needed, max_to_insert) * UNFM_P_SIZE, GFP_ATOMIC);	// We need to avoid scheduling.
GFP_ATOMIC        694 fs/super.c     	if (idr_pre_get(&unnamed_dev_idr, GFP_ATOMIC) == 0)
GFP_ATOMIC         48 fs/xfs/linux-2.6/kmem.h 		lflags = GFP_ATOMIC | __GFP_NOWARN;
GFP_ATOMIC        290 fs/xfs/xfs_mount.c 		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
GFP_ATOMIC        376 fs/xfs/xfs_mru_cache.c 	INIT_RADIX_TREE(&mru->store, GFP_ATOMIC);
GFP_ATOMIC        123 include/acpi/platform/aclinux.h 	return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
GFP_ATOMIC        127 include/acpi/platform/aclinux.h 	return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
GFP_ATOMIC        133 include/acpi/platform/aclinux.h 				 irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
GFP_ATOMIC         22 include/asm-generic/pci-dma-compat.h 	return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
GFP_ATOMIC         58 include/linux/gfp.h #define GFP_NOWAIT	(GFP_ATOMIC & ~__GFP_HIGH)
GFP_ATOMIC       1109 include/linux/i2o.h 	struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC);
GFP_ATOMIC         73 include/linux/kmod.h 	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
GFP_ATOMIC         86 include/linux/kmod.h 	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
GFP_ATOMIC       1385 include/linux/skbuff.h 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
GFP_ATOMIC       1400 include/linux/skbuff.h 	return __netdev_alloc_page(dev, GFP_ATOMIC);
GFP_ATOMIC       1434 include/linux/skbuff.h 					GFP_ATOMIC);
GFP_ATOMIC         63 include/net/request_sock.h 	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
GFP_ATOMIC       1236 include/net/sock.h 	return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
GFP_ATOMIC        635 include/net/xfrm.h 	audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
GFP_ATOMIC        622 kernel/auditsc.c 		ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
GFP_ATOMIC       2011 kernel/auditsc.c 	ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
GFP_ATOMIC       2053 kernel/auditsc.c 	ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
GFP_ATOMIC       2097 kernel/auditsc.c 	ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
GFP_ATOMIC       2145 kernel/auditsc.c 	ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
GFP_ATOMIC       2183 kernel/auditsc.c 	ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
GFP_ATOMIC       2207 kernel/auditsc.c 	ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
GFP_ATOMIC       2235 kernel/auditsc.c 	ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
GFP_ATOMIC       2411 kernel/auditsc.c 		axp = kzalloc(sizeof(*axp), GFP_ATOMIC);
GFP_ATOMIC        628 kernel/irq/manage.c 	action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
GFP_ATOMIC        603 kernel/module.c 	use = kmalloc(sizeof(*use), GFP_ATOMIC);
GFP_ATOMIC       1102 kernel/power/snapshot.c 	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
GFP_ATOMIC       1154 kernel/power/snapshot.c 	error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
GFP_ATOMIC       1158 kernel/power/snapshot.c 	error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
GFP_ATOMIC       1170 kernel/power/snapshot.c 		struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
GFP_ATOMIC       1326 kernel/power/snapshot.c 		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
GFP_ATOMIC       1542 kernel/power/snapshot.c 	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
GFP_ATOMIC       1720 kernel/power/snapshot.c 	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
GFP_ATOMIC       1742 kernel/power/snapshot.c 		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
GFP_ATOMIC       1755 kernel/power/snapshot.c 		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
GFP_ATOMIC       1852 kernel/power/snapshot.c 			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
GFP_ATOMIC       1866 kernel/power/snapshot.c 			error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
GFP_ATOMIC       1877 kernel/power/snapshot.c 				chain_init(&ca, GFP_ATOMIC, PG_SAFE);
GFP_ATOMIC       1963 kernel/power/snapshot.c 	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
GFP_ATOMIC        833 kernel/signal.c 	q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
GFP_ATOMIC        229 kernel/smp.c   			data = kmalloc(sizeof(*data), GFP_ATOMIC);
GFP_ATOMIC        351 kernel/smp.c   	data = kmalloc(sizeof(*data), GFP_ATOMIC);
GFP_ATOMIC       1764 kernel/sys.c   	char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
GFP_ATOMIC       1779 kernel/sys.c   	info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
GFP_ATOMIC         69 lib/debugobjects.c 	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
GFP_ATOMIC        318 mm/dmapool.c   	page = pool_alloc_page(pool, GFP_ATOMIC);
GFP_ATOMIC        180 mm/migrate.c   	mem_cgroup_charge(new, mm, GFP_ATOMIC);
GFP_ATOMIC       4452 mm/page_alloc.c 			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
GFP_ATOMIC       4455 mm/page_alloc.c 			table = (void*) __get_free_pages(GFP_ATOMIC, order);
GFP_ATOMIC       1046 mm/shmem.c     	if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
GFP_ATOMIC       3525 mm/slub.c      	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
GFP_ATOMIC         41 mm/swap_state.c 	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
GFP_ATOMIC        543 mm/vmscan.c    			if (!add_to_swap(page, GFP_ATOMIC))
GFP_ATOMIC        192 net/802/garp.c 	attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
GFP_ATOMIC        216 net/802/garp.c 			GFP_ATOMIC);
GFP_ATOMIC         43 net/802/p8022.c 	proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
GFP_ATOMIC         43 net/802/p8023.c 	struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
GFP_ATOMIC        137 net/802/psnap.c 	proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
GFP_ATOMIC        384 net/802/tr.c   		entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC);
GFP_ATOMIC        147 net/8021q/vlan_dev.c 	skb = skb_share_check(skb, GFP_ATOMIC);
GFP_ATOMIC        164 net/9p/trans_virtio.c 								GFP_ATOMIC);
GFP_ATOMIC        172 net/9p/trans_virtio.c 								GFP_ATOMIC);
GFP_ATOMIC        108 net/appletalk/aarp.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
GFP_ATOMIC        161 net/appletalk/aarp.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
GFP_ATOMIC        210 net/appletalk/aarp.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
GFP_ATOMIC        385 net/appletalk/aarp.c 	struct aarp_entry *a = kmalloc(sizeof(*a), GFP_ATOMIC);
GFP_ATOMIC        561 net/appletalk/ddp.c 		rt = kzalloc(sizeof(*rt), GFP_ATOMIC);
GFP_ATOMIC       1372 net/appletalk/ddp.c 		skb = skb_unshare(skb, GFP_ATOMIC);
GFP_ATOMIC       1412 net/appletalk/ddp.c 	if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
GFP_ATOMIC       1509 net/appletalk/ddp.c 		if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
GFP_ATOMIC         88 net/atm/addr.c 	this = kmalloc(sizeof(struct atm_dev_addr), GFP_ATOMIC);
GFP_ATOMIC        147 net/atm/addr.c 	tmp_buf = tmp_bufp = kmalloc(total, GFP_ATOMIC);
GFP_ATOMIC         57 net/atm/clip.c 	skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC);
GFP_ATOMIC        146 net/atm/lec.c  		skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
GFP_ATOMIC        345 net/atm/lec.c  					       GFP_ATOMIC);
GFP_ATOMIC        568 net/atm/lec.c  					      GFP_ATOMIC);
GFP_ATOMIC        647 net/atm/lec.c  	skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
GFP_ATOMIC       1294 net/atm/lec.c  		*tlvs = kmemdup(table->tlvs, table->sizeoftlvs, GFP_ATOMIC);
GFP_ATOMIC       1307 net/atm/lec.c  		skb = alloc_skb(*sizeoftlvs, GFP_ATOMIC);
GFP_ATOMIC       1341 net/atm/lec.c  	skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
GFP_ATOMIC       1770 net/atm/lec.c  	to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
GFP_ATOMIC        949 net/atm/mpc.c  	skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
GFP_ATOMIC       1223 net/atm/mpc.c  	skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
GFP_ATOMIC        220 net/atm/resources.c 			tmp_buf = kmalloc(size, GFP_ATOMIC);
GFP_ATOMIC        260 net/ax25/af_ax25.c 			if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        506 net/ax25/af_ax25.c 	if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL)
GFP_ATOMIC        849 net/ax25/af_ax25.c 	sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto);
GFP_ATOMIC        875 net/ax25/af_ax25.c 	sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC,	osk->sk_prot);
GFP_ATOMIC        926 net/ax25/af_ax25.c 					 GFP_ATOMIC);
GFP_ATOMIC         57 net/ax25/ax25_dev.c 	if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        136 net/ax25/ax25_ds_subr.c 	if ((skb = alloc_skb(2, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        115 net/ax25/ax25_iface.c 	if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL)
GFP_ATOMIC         55 net/ax25/ax25_in.c 							 GFP_ATOMIC);
GFP_ATOMIC        120 net/ax25/ax25_in.c 		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC        396 net/ax25/ax25_in.c 	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        156 net/ax25/ax25_ip.c 			if ((ourskb = skb_copy(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC         72 net/ax25/ax25_out.c 		ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
GFP_ATOMIC        142 net/ax25/ax25_out.c 			if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        277 net/ax25/ax25_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC         94 net/ax25/ax25_route.c 				if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        111 net/ax25/ax25_route.c 	if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        122 net/ax25/ax25_route.c 		if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        438 net/ax25/ax25_route.c 					 GFP_ATOMIC);
GFP_ATOMIC        155 net/ax25/ax25_subr.c 	if ((skb = alloc_skb(ax25->ax25_dev->dev->hard_header_len + 2, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        199 net/ax25/ax25_subr.c 	if ((skb = alloc_skb(dev->hard_header_len + 1, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        196 net/ax25/sysctl_net_ax25.c 	if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        204 net/ax25/sysctl_net_ax25.c 						  GFP_ATOMIC);
GFP_ATOMIC         81 net/bluetooth/bnep/netdev.c 	skb  = alloc_skb(size, GFP_ATOMIC);
GFP_ATOMIC        212 net/bluetooth/bnep/sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
GFP_ATOMIC        162 net/bluetooth/cmtp/capi.c 	if (!(skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC))) {
GFP_ATOMIC        122 net/bluetooth/cmtp/core.c 	if (!(nskb = alloc_skb(size, GFP_ATOMIC))) {
GFP_ATOMIC        224 net/bluetooth/cmtp/core.c 	if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) {
GFP_ATOMIC        207 net/bluetooth/cmtp/sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto);
GFP_ATOMIC        206 net/bluetooth/hci_conn.c 	conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
GFP_ATOMIC        360 net/bluetooth/hci_core.c 		if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
GFP_ATOMIC        998 net/bluetooth/hci_core.c 			skb = bt_skb_alloc(len, GFP_ATOMIC);
GFP_ATOMIC       1145 net/bluetooth/hci_core.c 	skb = bt_skb_alloc(len, GFP_ATOMIC);
GFP_ATOMIC       1576 net/bluetooth/hci_core.c 		if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
GFP_ATOMIC       1910 net/bluetooth/hci_event.c 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
GFP_ATOMIC        133 net/bluetooth/hci_sock.c 		if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
GFP_ATOMIC        640 net/bluetooth/hci_sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
GFP_ATOMIC        160 net/bluetooth/hidp/core.c 	if (!(skb = alloc_skb(3, GFP_ATOMIC))) {
GFP_ATOMIC        253 net/bluetooth/hidp/core.c 	if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
GFP_ATOMIC        310 net/bluetooth/hidp/core.c 	if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
GFP_ATOMIC        258 net/bluetooth/hidp/sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto);
GFP_ATOMIC        457 net/bluetooth/l2cap.c 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
GFP_ATOMIC        723 net/bluetooth/l2cap.c 	sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
GFP_ATOMIC       1303 net/bluetooth/l2cap.c 		if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
GFP_ATOMIC       1326 net/bluetooth/l2cap.c 	skb = bt_skb_alloc(count, GFP_ATOMIC);
GFP_ATOMIC       1352 net/bluetooth/l2cap.c 		*frag = bt_skb_alloc(count, GFP_ATOMIC);
GFP_ATOMIC       1600 net/bluetooth/l2cap.c 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
GFP_ATOMIC       2380 net/bluetooth/l2cap.c 		if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
GFP_ATOMIC        337 net/bluetooth/rfcomm/sock.c 	sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
GFP_ATOMIC        878 net/bluetooth/rfcomm/sock.c 	sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
GFP_ATOMIC        783 net/bluetooth/rfcomm/tty.c 		skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC);
GFP_ATOMIC        111 net/bluetooth/sco.c 	conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC);
GFP_ATOMIC        456 net/bluetooth/sco.c 	sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
GFP_ATOMIC        811 net/bluetooth/sco.c 		sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC);
GFP_ATOMIC        326 net/bridge/br_fdb.c 	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
GFP_ATOMIC        115 net/bridge/br_forward.c 				if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC         74 net/bridge/br_input.c 		skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        128 net/bridge/br_input.c 	skb = skb_share_check(skb, GFP_ATOMIC);
GFP_ATOMIC        138 net/bridge/br_netfilter.c 	skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
GFP_ATOMIC         90 net/bridge/br_netlink.c 	skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
GFP_ATOMIC        101 net/bridge/br_netlink.c 	err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
GFP_ATOMIC         86 net/bridge/netfilter/ebt_ulog.c 	netlink_broadcast(ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC);
GFP_ATOMIC        107 net/bridge/netfilter/ebt_ulog.c 	skb = alloc_skb(n, GFP_ATOMIC);
GFP_ATOMIC        114 net/bridge/netfilter/ebt_ulog.c 			skb = alloc_skb(size, GFP_ATOMIC);
GFP_ATOMIC        255 net/can/af_can.c 			newskb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        539 net/can/af_can.c 	struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        826 net/core/dev.c 		inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
GFP_ATOMIC       1346 net/core/dev.c 			struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1501 net/core/dev.c 		ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
GFP_ATOMIC       1538 net/core/dev.c 		    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
GFP_ATOMIC       2986 net/core/dev.c 			audit_log(current->audit_context, GFP_ATOMIC,
GFP_ATOMIC       3153 net/core/dev.c 	da = kzalloc(sizeof(*da), GFP_ATOMIC);
GFP_ATOMIC        171 net/core/dst.c 	dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
GFP_ATOMIC        210 net/core/flow.c 		fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
GFP_ATOMIC        275 net/core/neighbour.c 	n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
GFP_ATOMIC        305 net/core/neighbour.c 		ret = kzalloc(size, GFP_ATOMIC);
GFP_ATOMIC        308 net/core/neighbour.c 		      __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
GFP_ATOMIC        872 net/core/neighbour.c 			skb = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC       1140 net/core/neighbour.c 	if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
GFP_ATOMIC       2533 net/core/neighbour.c 	skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
GFP_ATOMIC       2544 net/core/neighbour.c 	err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
GFP_ATOMIC        196 net/core/netpoll.c 		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
GFP_ATOMIC        242 net/core/netpoll.c 	skb = alloc_skb(len, GFP_ATOMIC);
GFP_ATOMIC       2473 net/core/pktgen.c 				ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
GFP_ATOMIC       2549 net/core/pktgen.c 			pkt_dev->pkt_overhead, GFP_ATOMIC);
GFP_ATOMIC       2888 net/core/pktgen.c 			pkt_dev->pkt_overhead, GFP_ATOMIC);
GFP_ATOMIC        304 net/core/skbuff.c 	return __dev_alloc_skb(length, GFP_ATOMIC);
GFP_ATOMIC        824 net/core/skbuff.c 		skb2 = pskb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC        826 net/core/skbuff.c 		skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        828 net/core/skbuff.c 					     GFP_ATOMIC)) {
GFP_ATOMIC        926 net/core/skbuff.c 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
GFP_ATOMIC       1031 net/core/skbuff.c 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
GFP_ATOMIC       1066 net/core/skbuff.c 			nfrag = skb_clone(frag, GFP_ATOMIC);
GFP_ATOMIC       1138 net/core/skbuff.c 				     GFP_ATOMIC))
GFP_ATOMIC       1184 net/core/skbuff.c 					clone = skb_clone(list, GFP_ATOMIC);
GFP_ATOMIC       2308 net/core/skbuff.c 		nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
GFP_ATOMIC       2521 net/core/skbuff.c 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
GFP_ATOMIC       2563 net/core/skbuff.c 				skb2 = skb_copy(skb1, GFP_ATOMIC);
GFP_ATOMIC       2568 net/core/skbuff.c 						       GFP_ATOMIC);
GFP_ATOMIC         30 net/dccp/ackvec.c 			kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
GFP_ATOMIC        161 net/dccp/ccid.c 		if (gfp & GFP_ATOMIC)
GFP_ATOMIC         47 net/dccp/ccids/lib/loss_interval.c 								    GFP_ATOMIC);
GFP_ATOMIC        400 net/dccp/ccids/lib/packet_history.c 		h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC);
GFP_ATOMIC         87 net/dccp/feat.c 	new_ccid = ccid_new(new_ccid_nr, sk, rx, GFP_ATOMIC);
GFP_ATOMIC        183 net/dccp/feat.c 	rpref = kmalloc(rlen, GFP_ATOMIC);
GFP_ATOMIC        192 net/dccp/feat.c 		opt->dccpop_sc = kmalloc(sizeof(*opt->dccpop_sc), GFP_ATOMIC);
GFP_ATOMIC        285 net/dccp/feat.c 	opt = kzalloc(sizeof(*opt), GFP_ATOMIC);
GFP_ATOMIC        289 net/dccp/feat.c 	copy = kmemdup(val, len, GFP_ATOMIC);
GFP_ATOMIC        319 net/dccp/feat.c 	struct dccp_opt_pend *opt = kzalloc(sizeof(*opt), GFP_ATOMIC);
GFP_ATOMIC        529 net/dccp/feat.c 		u8 *val = kmemdup(opt->dccpop_val, opt->dccpop_len, GFP_ATOMIC);
GFP_ATOMIC        534 net/dccp/feat.c 		newopt = kmemdup(opt, sizeof(*newopt), GFP_ATOMIC);
GFP_ATOMIC        612 net/dccp/ipv6.c 		newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
GFP_ATOMIC        708 net/dccp/ipv6.c 		opt_skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        112 net/dccp/minisocks.c 	struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
GFP_ATOMIC        133 net/dccp/minisocks.c 						dccp_ackvec_alloc(GFP_ATOMIC);
GFP_ATOMIC        140 net/dccp/minisocks.c 					   newsk, GFP_ATOMIC);
GFP_ATOMIC        143 net/dccp/minisocks.c 					   newsk, GFP_ATOMIC);
GFP_ATOMIC        315 net/dccp/output.c 	return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
GFP_ATOMIC        327 net/dccp/output.c 					   GFP_ATOMIC);
GFP_ATOMIC        381 net/dccp/output.c 	skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
GFP_ATOMIC        435 net/dccp/output.c 	skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
GFP_ATOMIC        500 net/dccp/output.c 						GFP_ATOMIC);
GFP_ATOMIC        561 net/dccp/output.c 	struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
GFP_ATOMIC        588 net/dccp/output.c 	const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
GFP_ATOMIC       1078 net/dccp/proto.c 			__get_free_pages(GFP_ATOMIC, ehash_order);
GFP_ATOMIC       1103 net/dccp/proto.c 			__get_free_pages(GFP_ATOMIC, bhash_order);
GFP_ATOMIC        580 net/decnet/af_decnet.c 			dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
GFP_ATOMIC        586 net/decnet/af_decnet.c 			dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
GFP_ATOMIC        594 net/decnet/af_decnet.c 				dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
GFP_ATOMIC        879 net/decnet/dn_dev.c 	if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
GFP_ATOMIC        963 net/decnet/dn_dev.c 	if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL)
GFP_ATOMIC       1003 net/decnet/dn_dev.c 		struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC       1026 net/decnet/dn_dev.c 	struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC);
GFP_ATOMIC       1127 net/decnet/dn_dev.c 	if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
GFP_ATOMIC        445 net/decnet/dn_nsp_in.c 		dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
GFP_ATOMIC        250 net/decnet/dn_nsp_out.c 		reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
GFP_ATOMIC        261 net/decnet/dn_nsp_out.c 		reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
GFP_ATOMIC        463 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        475 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        516 net/decnet/dn_nsp_out.c 		dn_send_conn_conf(sk, GFP_ATOMIC);
GFP_ATOMIC        617 net/decnet/dn_nsp_out.c 	gfp_t gfp = GFP_ATOMIC;
GFP_ATOMIC        629 net/decnet/dn_nsp_out.c 	gfp_t gfp = GFP_ATOMIC;
GFP_ATOMIC        664 net/decnet/dn_nsp_out.c 	gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
GFP_ATOMIC        389 net/decnet/dn_route.c 	if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        428 net/decnet/dn_route.c 	if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        589 net/decnet/dn_route.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC       1776 net/decnet/dn_route.c 			__get_free_pages(GFP_ATOMIC, order);
GFP_ATOMIC         43 net/decnet/netfilter/dn_rtmsg.c 	skb = alloc_skb(size, GFP_ATOMIC);
GFP_ATOMIC         86 net/decnet/netfilter/dn_rtmsg.c 	netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC);
GFP_ATOMIC         88 net/dsa/tag_dsa.c 	skb = skb_unshare(skb, GFP_ATOMIC);
GFP_ATOMIC        101 net/dsa/tag_edsa.c 	skb = skb_unshare(skb, GFP_ATOMIC);
GFP_ATOMIC         36 net/dsa/tag_trailer.c 	nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
GFP_ATOMIC         78 net/dsa/tag_trailer.c 	skb = skb_unshare(skb, GFP_ATOMIC);
GFP_ATOMIC        223 net/econet/af_econet.c 	struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
GFP_ATOMIC        865 net/econet/af_econet.c 			   GFP_ATOMIC);
GFP_ATOMIC       1031 net/econet/af_econet.c 	udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
GFP_ATOMIC       1074 net/econet/af_econet.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC         21 net/ethernet/pe2.c 	struct datalink_proto *proto = kmalloc(sizeof(*proto), GFP_ATOMIC);
GFP_ATOMIC         70 net/ieee80211/ieee80211_crypt_ccmp.c 	priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
GFP_ATOMIC         86 net/ieee80211/ieee80211_crypt_tkip.c 	priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
GFP_ATOMIC         45 net/ieee80211/ieee80211_crypt_wep.c 	priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
GFP_ATOMIC        187 net/ieee80211/ieee80211_rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        802 net/ieee80211/ieee80211_rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1334 net/ieee80211/ieee80211_rx.c 						    GFP_ATOMIC);
GFP_ATOMIC        428 net/ieee80211/ieee80211_tx.c 				  ieee->tx_headroom, GFP_ATOMIC);
GFP_ATOMIC       1256 net/ipv4/af_inet.c 		(*sk)->sk_allocation = GFP_ATOMIC;
GFP_ATOMIC        152 net/ipv4/ah4.c 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        572 net/ipv4/arp.c 	skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
GFP_ATOMIC        926 net/ipv4/arp.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        391 net/ipv4/cipso_ipv4.c 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
GFP_ATOMIC        394 net/ipv4/cipso_ipv4.c 	entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
GFP_ATOMIC        932 net/ipv4/cipso_ipv4.c 						       GFP_ATOMIC);
GFP_ATOMIC       1034 net/ipv4/cipso_ipv4.c 				GFP_ATOMIC);
GFP_ATOMIC       1177 net/ipv4/cipso_ipv4.c 						       GFP_ATOMIC);
GFP_ATOMIC       1293 net/ipv4/cipso_ipv4.c 		                       netlbl_secattr_catmap_alloc(GFP_ATOMIC);
GFP_ATOMIC       1389 net/ipv4/cipso_ipv4.c 			               netlbl_secattr_catmap_alloc(GFP_ATOMIC);
GFP_ATOMIC       1484 net/ipv4/cipso_ipv4.c 			               netlbl_secattr_catmap_alloc(GFP_ATOMIC);
GFP_ATOMIC       1850 net/ipv4/cipso_ipv4.c 	buf = kmalloc(buf_len, GFP_ATOMIC);
GFP_ATOMIC       1866 net/ipv4/cipso_ipv4.c 	opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
GFP_ATOMIC         50 net/ipv4/esp4.c 	return kmalloc(len, GFP_ATOMIC);
GFP_ATOMIC        294 net/ipv4/igmp.c 	skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
GFP_ATOMIC        656 net/ipv4/igmp.c 	skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
GFP_ATOMIC       1551 net/ipv4/igmp.c 		psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
GFP_ATOMIC       1627 net/ipv4/igmp.c 					kmalloc(sizeof(*dpsf), GFP_ATOMIC);
GFP_ATOMIC        242 net/ipv4/inet_fragment.c 	q = kzalloc(f->qsize, GFP_ATOMIC);
GFP_ATOMIC         35 net/ipv4/inet_hashtables.c 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
GFP_ATOMIC        112 net/ipv4/inet_timewait_sock.c 				 GFP_ATOMIC);
GFP_ATOMIC        387 net/ipv4/inetpeer.c 	n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
GFP_ATOMIC        477 net/ipv4/ip_fragment.c 		fp = skb_clone(head, GFP_ATOMIC);
GFP_ATOMIC        503 net/ipv4/ip_fragment.c 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        513 net/ipv4/ip_fragment.c 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        175 net/ipv4/ip_input.c 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        383 net/ipv4/ip_input.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        270 net/ipv4/ip_output.c 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        286 net/ipv4/ip_output.c 		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        605 net/ipv4/ip_output.c 		if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        264 net/ipv4/ip_sockglue.c 	skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        298 net/ipv4/ip_sockglue.c 	skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
GFP_ATOMIC        438 net/ipv4/ipconfig.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        862 net/ipv4/ipconfig.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        534 net/ipv4/ipmr.c 	struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
GFP_ATOMIC        597 net/ipv4/ipmr.c 		skb = alloc_skb(128, GFP_ATOMIC);
GFP_ATOMIC       1363 net/ipv4/ipmr.c 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1372 net/ipv4/ipmr.c 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1435 net/ipv4/ipmr.c 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1653 net/ipv4/ipmr.c 		skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC         76 net/ipv4/netfilter.c 	    pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
GFP_ATOMIC        109 net/ipv4/netfilter.c 	    pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
GFP_ATOMIC        184 net/ipv4/netfilter/ip_queue.c 	skb = alloc_skb(size, GFP_ATOMIC);
GFP_ATOMIC        297 net/ipv4/netfilter/ip_queue.c 					       diff, GFP_ATOMIC);
GFP_ATOMIC        152 net/ipv4/netfilter/ipt_CLUSTERIP.c 	c = kzalloc(sizeof(*c), GFP_ATOMIC);
GFP_ATOMIC         63 net/ipv4/netfilter/ipt_REJECT.c 			 LL_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC        109 net/ipv4/netfilter/ipt_ULOG.c 	netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC);
GFP_ATOMIC        139 net/ipv4/netfilter/ipt_ULOG.c 	skb = alloc_skb(n, GFP_ATOMIC);
GFP_ATOMIC        147 net/ipv4/netfilter/ipt_ULOG.c 			skb = alloc_skb(size, GFP_ATOMIC);
GFP_ATOMIC        293 net/ipv4/netfilter/nf_nat_core.c 		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
GFP_ATOMIC        120 net/ipv4/netfilter/nf_nat_helper.c 	if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
GFP_ATOMIC        400 net/ipv4/netfilter/nf_nat_snmp_basic.c 	*octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
GFP_ATOMIC        451 net/ipv4/netfilter/nf_nat_snmp_basic.c 	*oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
GFP_ATOMIC        727 net/ipv4/netfilter/nf_nat_snmp_basic.c 				       GFP_ATOMIC);
GFP_ATOMIC        743 net/ipv4/netfilter/nf_nat_snmp_basic.c 				       GFP_ATOMIC);
GFP_ATOMIC        758 net/ipv4/netfilter/nf_nat_snmp_basic.c 			*obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
GFP_ATOMIC        778 net/ipv4/netfilter/nf_nat_snmp_basic.c 			*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
GFP_ATOMIC        799 net/ipv4/netfilter/nf_nat_snmp_basic.c 			*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
GFP_ATOMIC        818 net/ipv4/netfilter/nf_nat_snmp_basic.c 			*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
GFP_ATOMIC       1150 net/ipv4/netfilter/nf_nat_snmp_basic.c 	obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
GFP_ATOMIC        108 net/ipv4/netfilter/nf_nat_standalone.c 		nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
GFP_ATOMIC        177 net/ipv4/raw.c 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        318 net/ipv4/syncookies.c 		ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
GFP_ATOMIC       1873 net/ipv4/tcp.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
GFP_ATOMIC       1896 net/ipv4/tcp.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
GFP_ATOMIC       4247 net/ipv4/tcp_input.c 		nskb = alloc_skb(copy + header, GFP_ATOMIC);
GFP_ATOMIC        780 net/ipv4/tcp_ipv4.c 		dopt = kmalloc(opt_size, GFP_ATOMIC);
GFP_ATOMIC        848 net/ipv4/tcp_ipv4.c 						  GFP_ATOMIC);
GFP_ATOMIC        863 net/ipv4/tcp_ipv4.c 					(md5sig->entries4 + 1)), GFP_ATOMIC);
GFP_ATOMIC       1388 net/ipv4/tcp_ipv4.c 		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
GFP_ATOMIC        385 net/ipv4/tcp_minisocks.c 	struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
GFP_ATOMIC        673 net/ipv4/tcp_minisocks.c 					       GFP_ATOMIC);
GFP_ATOMIC        766 net/ipv4/tcp_output.c 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        770 net/ipv4/tcp_output.c 	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
GFP_ATOMIC        884 net/ipv4/tcp_output.c 	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC       1275 net/ipv4/tcp_output.c 	buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
GFP_ATOMIC       1431 net/ipv4/tcp_output.c 	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
GFP_ATOMIC       1488 net/ipv4/tcp_output.c 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
GFP_ATOMIC       1569 net/ipv4/tcp_output.c 		if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
GFP_ATOMIC       1970 net/ipv4/tcp_output.c 	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
GFP_ATOMIC       2208 net/ipv4/tcp_output.c 			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC       2224 net/ipv4/tcp_output.c 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
GFP_ATOMIC       2243 net/ipv4/tcp_output.c 	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
GFP_ATOMIC       2490 net/ipv4/tcp_output.c 	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
GFP_ATOMIC       2505 net/ipv4/tcp_output.c 	tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
GFP_ATOMIC       2525 net/ipv4/tcp_output.c 	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
GFP_ATOMIC       2537 net/ipv4/tcp_output.c 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
GFP_ATOMIC       2572 net/ipv4/tcp_output.c 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
GFP_ATOMIC         90 net/ipv4/tcp_timer.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
GFP_ATOMIC        489 net/ipv4/tcp_timer.c 		tcp_send_active_reset(sk, GFP_ATOMIC);
GFP_ATOMIC        507 net/ipv4/tcp_timer.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
GFP_ATOMIC       1092 net/ipv4/udp.c 				skb1 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        135 net/ipv4/xfrm4_input.c 	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC         78 net/ipv4/xfrm4_mode_tunnel.c 	    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
GFP_ATOMIC        615 net/ipv6/addrconf.c 	ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
GFP_ATOMIC       3633 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
GFP_ATOMIC       3644 net/ipv6/addrconf.c 	err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
GFP_ATOMIC       3844 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
GFP_ATOMIC       3855 net/ipv6/addrconf.c 	err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
GFP_ATOMIC       3914 net/ipv6/addrconf.c 	skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
GFP_ATOMIC       3925 net/ipv6/addrconf.c 	err = rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
GFP_ATOMIC        254 net/ipv6/ah6.c 		tmp_ext = kmalloc(extlen, GFP_ATOMIC);
GFP_ATOMIC        343 net/ipv6/ah6.c 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        361 net/ipv6/ah6.c 	tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC);
GFP_ATOMIC        290 net/ipv6/anycast.c 	aca = kzalloc(sizeof(struct ifacaddr6), GFP_ATOMIC);
GFP_ATOMIC        220 net/ipv6/datagram.c 	skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        253 net/ipv6/datagram.c 	skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
GFP_ATOMIC         76 net/ipv6/esp6.c 	return kmalloc(len, GFP_ATOMIC);
GFP_ATOMIC        232 net/ipv6/exthdrs.c 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        419 net/ipv6/exthdrs.c 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
GFP_ATOMIC        737 net/ipv6/exthdrs.c 	opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
GFP_ATOMIC        808 net/ipv6/exthdrs.c 	opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
GFP_ATOMIC        151 net/ipv6/ip6_fib.c 	fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
GFP_ATOMIC        198 net/ipv6/ip6_fib.c 	table = kzalloc(sizeof(*table), GFP_ATOMIC);
GFP_ATOMIC        372 net/ipv6/ip6_fib.c 		w = kzalloc(sizeof(*w), GFP_ATOMIC);
GFP_ATOMIC         75 net/ipv6/ip6_input.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
GFP_ATOMIC        312 net/ipv6/ip6_input.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        143 net/ipv6/ip6_output.c 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        342 net/ipv6/ip6_output.c 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        695 net/ipv6/ip6_output.c 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
GFP_ATOMIC        808 net/ipv6/ip6_output.c 		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        531 net/ipv6/ip6_tunnel.c 	skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        604 net/ipv6/ip6_tunnel.c 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        696 net/ipv6/ip6mr.c 	struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
GFP_ATOMIC        755 net/ipv6/ip6mr.c 		skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
GFP_ATOMIC       1520 net/ipv6/ip6mr.c 				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1637 net/ipv6/ip6mr.c 		skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
GFP_ATOMIC        451 net/ipv6/mcast.c 		newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
GFP_ATOMIC        538 net/ipv6/mcast.c 							  GFP_ATOMIC);
GFP_ATOMIC        771 net/ipv6/mcast.c 	pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
GFP_ATOMIC        897 net/ipv6/mcast.c 	mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
GFP_ATOMIC       1966 net/ipv6/mcast.c 		psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
GFP_ATOMIC       2041 net/ipv6/mcast.c 					kmalloc(sizeof(*dpsf), GFP_ATOMIC);
GFP_ATOMIC        810 net/ipv6/ndisc.c 				struct sk_buff *n = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1034 net/ipv6/ndisc.c 	skb = nlmsg_new(msg_size, GFP_ATOMIC);
GFP_ATOMIC       1059 net/ipv6/ndisc.c 			  GFP_ATOMIC);
GFP_ATOMIC        187 net/ipv6/netfilter/ip6_queue.c 	skb = alloc_skb(size, GFP_ATOMIC);
GFP_ATOMIC        299 net/ipv6/netfilter/ip6_queue.c 					       diff, GFP_ATOMIC);
GFP_ATOMIC        106 net/ipv6/netfilter/ip6t_REJECT.c 			 GFP_ATOMIC);
GFP_ATOMIC        404 net/ipv6/netfilter/nf_conntrack_reasm.c 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
GFP_ATOMIC        416 net/ipv6/netfilter/nf_conntrack_reasm.c 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        584 net/ipv6/netfilter/nf_conntrack_reasm.c 	clone = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        209 net/ipv6/raw.c 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        464 net/ipv6/reassembly.c 		fp = skb_clone(head, GFP_ATOMIC);
GFP_ATOMIC        490 net/ipv6/reassembly.c 	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        500 net/ipv6/reassembly.c 		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        254 net/ipv6/sit.c 		kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
GFP_ATOMIC        586 net/ipv6/tcp_ipv6.c 			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
GFP_ATOMIC        599 net/ipv6/tcp_ipv6.c 				       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
GFP_ATOMIC        964 net/ipv6/tcp_ipv6.c 			 GFP_ATOMIC);
GFP_ATOMIC       1365 net/ipv6/tcp_ipv6.c 		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
GFP_ATOMIC       1407 net/ipv6/tcp_ipv6.c 		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
GFP_ATOMIC       1501 net/ipv6/tcp_ipv6.c 		opt_skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        395 net/ipv6/udp.c 		struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC         72 net/ipv6/xfrm6_mode_tunnel.c 	    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
GFP_ATOMIC        184 net/ipv6/xfrm6_tunnel.c 	x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
GFP_ATOMIC        403 net/ipx/af_ipx.c 				skb1 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        519 net/ipx/af_ipx.c 		skb1 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        529 net/ipx/af_ipx.c 		skb2 = skb_clone(skb1, GFP_ATOMIC);
GFP_ATOMIC        567 net/ipx/af_ipx.c 	skb2 = alloc_skb(len, GFP_ATOMIC);
GFP_ATOMIC        653 net/ipx/af_ipx.c 		skb = skb_unshare(skb, GFP_ATOMIC);
GFP_ATOMIC        726 net/ipx/af_ipx.c 			skb = skb_unshare(skb, GFP_ATOMIC);
GFP_ATOMIC        858 net/ipx/af_ipx.c 			struct sk_buff *s = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC        887 net/ipx/af_ipx.c 	struct ipx_interface *intrfc = kmalloc(sizeof(*intrfc), GFP_ATOMIC);
GFP_ATOMIC       1646 net/ipx/af_ipx.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC         60 net/ipx/ipx_route.c 		rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
GFP_ATOMIC        308 net/irda/af_irda.c 			GFP_ATOMIC);
GFP_ATOMIC       1087 net/irda/af_irda.c 	sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto);
GFP_ATOMIC       1855 net/irda/af_irda.c 		ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
GFP_ATOMIC       1975 net/irda/af_irda.c 		ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
GFP_ATOMIC       2224 net/irda/af_irda.c 		ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
GFP_ATOMIC       2280 net/irda/af_irda.c 		ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC);
GFP_ATOMIC        185 net/irda/discovery.c 				buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
GFP_ATOMIC        297 net/irda/discovery.c 				buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
GFP_ATOMIC        119 net/irda/ircomm/ircomm_core.c 	self = kzalloc(sizeof(struct ircomm_cb), GFP_ATOMIC);
GFP_ATOMIC        437 net/irda/ircomm/ircomm_core.c 		ctrl_skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC         83 net/irda/ircomm/ircomm_lmp.c 		tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC        117 net/irda/ircomm/ircomm_lmp.c 		tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC        123 net/irda/ircomm/ircomm_param.c 		skb = alloc_skb(256, GFP_ATOMIC);
GFP_ATOMIC        757 net/irda/ircomm/ircomm_tty.c 					GFP_ATOMIC);
GFP_ATOMIC        176 net/irda/iriap.c 	self = kzalloc(sizeof(*self), GFP_ATOMIC);
GFP_ATOMIC        348 net/irda/iriap.c 	tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC        400 net/irda/iriap.c 	tx_skb = alloc_skb(skb_len, GFP_ATOMIC);
GFP_ATOMIC        569 net/irda/iriap.c 			   GFP_ATOMIC);
GFP_ATOMIC        707 net/irda/iriap.c 	tx_skb = alloc_skb(LMP_MAX_HEADER + 1, GFP_ATOMIC);
GFP_ATOMIC        368 net/irda/iriap_event.c 		tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC         52 net/irda/irias_object.c 	obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
GFP_ATOMIC         60 net/irda/irias_object.c 	obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC);
GFP_ATOMIC        322 net/irda/irias_object.c 	attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
GFP_ATOMIC        330 net/irda/irias_object.c 	attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
GFP_ATOMIC        366 net/irda/irias_object.c 	attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC);
GFP_ATOMIC        374 net/irda/irias_object.c 	attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
GFP_ATOMIC        408 net/irda/irias_object.c 	attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC);
GFP_ATOMIC        416 net/irda/irias_object.c 	attrib->name = kstrndup(name, IAS_MAX_ATTRIBNAME, GFP_ATOMIC);
GFP_ATOMIC        443 net/irda/irias_object.c 	value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
GFP_ATOMIC        468 net/irda/irias_object.c 	value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
GFP_ATOMIC        476 net/irda/irias_object.c 	value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC);
GFP_ATOMIC        499 net/irda/irias_object.c 	value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
GFP_ATOMIC        511 net/irda/irias_object.c 	value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC);
GFP_ATOMIC        524 net/irda/irias_object.c 	value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC);
GFP_ATOMIC        396 net/irda/irlan/irlan_client.c 	name = kmalloc(255, GFP_ATOMIC);
GFP_ATOMIC        399 net/irda/irlan/irlan_client.c 	value = kmalloc(1016, GFP_ATOMIC);
GFP_ATOMIC        645 net/irda/irlan/irlan_common.c 			GFP_ATOMIC);
GFP_ATOMIC        680 net/irda/irlan/irlan_common.c 			GFP_ATOMIC);
GFP_ATOMIC        718 net/irda/irlan/irlan_common.c 			GFP_ATOMIC);
GFP_ATOMIC        757 net/irda/irlan/irlan_common.c 			GFP_ATOMIC);
GFP_ATOMIC        800 net/irda/irlan/irlan_common.c 			GFP_ATOMIC);
GFP_ATOMIC        844 net/irda/irlan/irlan_common.c 			GFP_ATOMIC);
GFP_ATOMIC        889 net/irda/irlan/irlan_common.c 			GFP_ATOMIC);
GFP_ATOMIC        926 net/irda/irlan/irlan_common.c 			GFP_ATOMIC);
GFP_ATOMIC        249 net/irda/irlan/irlan_provider.c 	name = kmalloc(255, GFP_ATOMIC);
GFP_ATOMIC        252 net/irda/irlan/irlan_provider.c 	value = kmalloc(1016, GFP_ATOMIC);
GFP_ATOMIC        305 net/irda/irlan/irlan_provider.c 			GFP_ATOMIC);
GFP_ATOMIC        884 net/irda/irlap.c 		skb = alloc_skb(0, GFP_ATOMIC);
GFP_ATOMIC        131 net/irda/irlap_frame.c 			   GFP_ATOMIC);
GFP_ATOMIC        226 net/irda/irlap_frame.c 			   GFP_ATOMIC);
GFP_ATOMIC        266 net/irda/irlap_frame.c 	tx_skb = alloc_skb(sizeof(struct dm_frame), GFP_ATOMIC);
GFP_ATOMIC        298 net/irda/irlap_frame.c 	tx_skb = alloc_skb(sizeof(struct disc_frame), GFP_ATOMIC);
GFP_ATOMIC        332 net/irda/irlap_frame.c 			   GFP_ATOMIC);
GFP_ATOMIC        439 net/irda/irlap_frame.c 	if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        546 net/irda/irlap_frame.c 		discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC);
GFP_ATOMIC        592 net/irda/irlap_frame.c 	tx_skb = alloc_skb(sizeof(struct rr_frame), GFP_ATOMIC);
GFP_ATOMIC        617 net/irda/irlap_frame.c 	tx_skb = alloc_skb(sizeof(struct rd_frame), GFP_ATOMIC);
GFP_ATOMIC        747 net/irda/irlap_frame.c 		tx_skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        793 net/irda/irlap_frame.c 		tx_skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        901 net/irda/irlap_frame.c 		tx_skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        958 net/irda/irlap_frame.c 		tx_skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1002 net/irda/irlap_frame.c 		tx_skb = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC       1074 net/irda/irlap_frame.c 		tx_skb = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC       1234 net/irda/irlap_frame.c 	tx_skb = alloc_skb(cmd->len + sizeof(struct test_frame), GFP_ATOMIC);
GFP_ATOMIC       1344 net/irda/irlap_frame.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        165 net/irda/irlmp.c 	self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
GFP_ATOMIC        399 net/irda/irlmp.c 		tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC        648 net/irda/irlmp.c 	new = kmemdup(orig, sizeof(*new), GFP_ATOMIC);
GFP_ATOMIC       1239 net/irda/irlmp.c 		clone_skb = skb_clone(userdata, GFP_ATOMIC);
GFP_ATOMIC       1383 net/irda/irlmp.c 	service = kmalloc(16, GFP_ATOMIC);
GFP_ATOMIC       1489 net/irda/irlmp.c 	service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC);
GFP_ATOMIC       1565 net/irda/irlmp.c 	client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC);
GFP_ATOMIC        359 net/irda/irqueue.c 	hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC);
GFP_ATOMIC        310 net/irda/irttp.c 				 GFP_ATOMIC);
GFP_ATOMIC        407 net/irda/irttp.c 	self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
GFP_ATOMIC        820 net/irda/irttp.c 	tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC       1110 net/irda/irttp.c 				   GFP_ATOMIC);
GFP_ATOMIC       1359 net/irda/irttp.c 				   GFP_ATOMIC);
GFP_ATOMIC       1448 net/irda/irttp.c 	new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
GFP_ATOMIC       1556 net/irda/irttp.c 		tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC        688 net/iucv/af_iucv.c 		nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
GFP_ATOMIC        749 net/iucv/af_iucv.c 		skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
GFP_ATOMIC       1022 net/iucv/af_iucv.c 	nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
GFP_ATOMIC       1090 net/iucv/af_iucv.c 	skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
GFP_ATOMIC       1101 net/iucv/af_iucv.c 	save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
GFP_ATOMIC       1239 net/iucv/iucv.c 	path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC);
GFP_ATOMIC       1573 net/iucv/iucv.c 	work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
GFP_ATOMIC        330 net/key/af_key.c 		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
GFP_ATOMIC        821 net/key/af_key.c 	skb =  alloc_skb(size + 16, GFP_ATOMIC);
GFP_ATOMIC       1479 net/key/af_key.c 	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
GFP_ATOMIC       1593 net/key/af_key.c 	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk);
GFP_ATOMIC       1704 net/key/af_key.c 	skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
GFP_ATOMIC       1716 net/key/af_key.c 	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
GFP_ATOMIC       1770 net/key/af_key.c 		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
GFP_ATOMIC       1964 net/key/af_key.c 	skb =  alloc_skb(size + 16, GFP_ATOMIC);
GFP_ATOMIC       2151 net/key/af_key.c 	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
GFP_ATOMIC       2376 net/key/af_key.c 	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk);
GFP_ATOMIC       2628 net/key/af_key.c 		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
GFP_ATOMIC       2666 net/key/af_key.c 	skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
GFP_ATOMIC       2676 net/key/af_key.c 	pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL);
GFP_ATOMIC       2939 net/key/af_key.c 	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL);
GFP_ATOMIC       3034 net/key/af_key.c 	skb =  alloc_skb(size + 16, GFP_ATOMIC);
GFP_ATOMIC       3107 net/key/af_key.c 	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL);
GFP_ATOMIC       3145 net/key/af_key.c 	xp = xfrm_policy_alloc(GFP_ATOMIC);
GFP_ATOMIC       3232 net/key/af_key.c 	skb =  alloc_skb(size + 16, GFP_ATOMIC);
GFP_ATOMIC       3303 net/key/af_key.c 	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL);
GFP_ATOMIC       3441 net/key/af_key.c 	skb = alloc_skb(size, GFP_ATOMIC);
GFP_ATOMIC       3494 net/key/af_key.c 	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
GFP_ATOMIC        118 net/lapb/lapb_iface.c 	struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC);
GFP_ATOMIC         90 net/lapb/lapb_out.c 			if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        234 net/lapb/lapb_subr.c 	if ((skb = alloc_skb(LAPB_HEADER_LEN + 3, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        270 net/lapb/lapb_subr.c 	if ((skb = alloc_skb(LAPB_HEADER_LEN + 7, GFP_ATOMIC)) == NULL)
GFP_ATOMIC       1323 net/llc/llc_c_ac.c 	struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
GFP_ATOMIC        359 net/llc/llc_conn.c 			struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        703 net/llc/llc_conn.c 	struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC,
GFP_ATOMIC         35 net/llc/llc_core.c 	struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
GFP_ATOMIC        100 net/llc/llc_if.c 	skb = alloc_skb(0, GFP_ATOMIC);
GFP_ATOMIC        139 net/llc/llc_if.c 	skb = alloc_skb(0, GFP_ATOMIC);
GFP_ATOMIC        164 net/llc/llc_input.c 	skb = skb_share_check(skb, GFP_ATOMIC);
GFP_ATOMIC        184 net/llc/llc_input.c 		struct sk_buff *cskb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC         57 net/llc/llc_sap.c 	skb = alloc_skb(hlen + data_size, GFP_ATOMIC);
GFP_ATOMIC        360 net/llc/llc_sap.c 		skb1 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        664 net/llc/llc_station.c 	struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
GFP_ATOMIC        702 net/llc/llc_station.c 	skb = alloc_skb(0, GFP_ATOMIC);
GFP_ATOMIC         23 net/mac80211/event.c 	char *buf = kmalloc(128, GFP_ATOMIC);
GFP_ATOMIC        428 net/mac80211/ht.c 			kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
GFP_ATOMIC        842 net/mac80211/ht.c 			kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
GFP_ATOMIC        862 net/mac80211/ht.c 		kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
GFP_ATOMIC        687 net/mac80211/main.c 				skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        192 net/mac80211/mesh.c 	p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
GFP_ATOMIC        576 net/mac80211/mesh_pathtbl.c 	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
GFP_ATOMIC        104 net/mac80211/mesh_plink.c 	sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC);
GFP_ATOMIC       1264 net/mac80211/mlme.c 		sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC);
GFP_ATOMIC       2370 net/mac80211/mlme.c 	sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
GFP_ATOMIC        513 net/mac80211/rc80211_minstrel.c 	mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
GFP_ATOMIC        338 net/mac80211/rc80211_pid_algo.c 	pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
GFP_ATOMIC        344 net/mac80211/rc80211_pid_algo.c 	rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
GFP_ATOMIC        267 net/mac80211/rx.c 		    pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
GFP_ATOMIC        276 net/mac80211/rx.c 		skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
GFP_ATOMIC        305 net/mac80211/rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        943 net/mac80211/rx.c 					      GFP_ATOMIC))) {
GFP_ATOMIC       1233 net/mac80211/rx.c 			xmit_skb = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC       1432 net/mac80211/rx.c 			fwd_skb = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC       1695 net/mac80211/rx.c 	    pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
GFP_ATOMIC       1730 net/mac80211/rx.c 			skb2 = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC       1982 net/mac80211/rx.c 		skb_new = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC        112 net/mac80211/scan.c 	bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
GFP_ATOMIC        165 net/mac80211/scan.c 	bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
GFP_ATOMIC        169 net/mac80211/scan.c 	bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
GFP_ATOMIC        176 net/mac80211/scan.c 		bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
GFP_ATOMIC        317 net/mac80211/scan.c 			bss->ies = kmalloc(elems->total_len, GFP_ATOMIC);
GFP_ATOMIC        853 net/mac80211/scan.c 	buf = kmalloc(30, GFP_ATOMIC);
GFP_ATOMIC        873 net/mac80211/scan.c 		buf = kmalloc(50, GFP_ATOMIC);
GFP_ATOMIC        663 net/mac80211/tx.c 	frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
GFP_ATOMIC       1247 net/mac80211/tx.c 	if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
GFP_ATOMIC       1910 net/mac80211/tx.c 		skb = skb_copy(ifsta->probe_resp, GFP_ATOMIC);
GFP_ATOMIC        155 net/mac80211/wep.c 	rc4key = kmalloc(klen, GFP_ATOMIC);
GFP_ATOMIC        243 net/mac80211/wep.c 	rc4key = kmalloc(klen, GFP_ATOMIC);
GFP_ATOMIC        687 net/netfilter/ipvs/ip_vs_conn.c 	cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
GFP_ATOMIC        842 net/netfilter/ipvs/ip_vs_ctl.c 	dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
GFP_ATOMIC       1183 net/netfilter/ipvs/ip_vs_ctl.c 	svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
GFP_ATOMIC        140 net/netfilter/ipvs/ip_vs_dh.c 		      GFP_ATOMIC);
GFP_ATOMIC        224 net/netfilter/ipvs/ip_vs_ftp.c 			ret = !ip_vs_skb_replace(skb, GFP_ATOMIC, start,
GFP_ATOMIC        190 net/netfilter/ipvs/ip_vs_lblc.c 		en = kmalloc(sizeof(*en), GFP_ATOMIC);
GFP_ATOMIC        322 net/netfilter/ipvs/ip_vs_lblc.c 	tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
GFP_ATOMIC        109 net/netfilter/ipvs/ip_vs_lblcr.c 	e = kmalloc(sizeof(*e), GFP_ATOMIC);
GFP_ATOMIC        364 net/netfilter/ipvs/ip_vs_lblcr.c 		en = kmalloc(sizeof(*en), GFP_ATOMIC);
GFP_ATOMIC        497 net/netfilter/ipvs/ip_vs_lblcr.c 	tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
GFP_ATOMIC        119 net/netfilter/ipvs/ip_vs_proto.c 	return kmemdup(table, size, GFP_ATOMIC);
GFP_ATOMIC        137 net/netfilter/ipvs/ip_vs_sh.c 		      GFP_ATOMIC);
GFP_ATOMIC        179 net/netfilter/ipvs/ip_vs_sync.c 	if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
GFP_ATOMIC        182 net/netfilter/ipvs/ip_vs_sync.c 	if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
GFP_ATOMIC         95 net/netfilter/ipvs/ip_vs_wrr.c 	mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC);
GFP_ATOMIC        258 net/netfilter/ipvs/ip_vs_xmit.c 	if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
GFP_ATOMIC        322 net/netfilter/ipvs/ip_vs_xmit.c 	skb = skb_share_check(skb, GFP_ATOMIC);
GFP_ATOMIC        772 net/netfilter/ipvs/ip_vs_xmit.c 	if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
GFP_ATOMIC        825 net/netfilter/ipvs/ip_vs_xmit.c 	skb = skb_share_check(skb, GFP_ATOMIC);
GFP_ATOMIC        547 net/netfilter/nf_conntrack_core.c 	ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
GFP_ATOMIC        559 net/netfilter/nf_conntrack_core.c 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
GFP_ATOMIC        570 net/netfilter/nf_conntrack_core.c 			help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
GFP_ATOMIC        588 net/netfilter/nf_conntrack_core.c 			help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
GFP_ATOMIC        789 net/netfilter/nf_conntrack_core.c 		help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
GFP_ATOMIC        228 net/netfilter/nf_conntrack_expect.c 	new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
GFP_ATOMIC        449 net/netfilter/nf_conntrack_netlink.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
GFP_ATOMIC        971 net/netfilter/nf_conntrack_netlink.c 		help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
GFP_ATOMIC       1142 net/netfilter/nf_conntrack_netlink.c 		help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
GFP_ATOMIC       1423 net/netfilter/nf_conntrack_netlink.c 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
GFP_ATOMIC        123 net/netfilter/nf_conntrack_proto_gre.c 	km = kmalloc(sizeof(*km), GFP_ATOMIC);
GFP_ATOMIC        136 net/netfilter/nf_queue.c 	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
GFP_ATOMIC        139 net/netfilter/nfnetlink_log.c 	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
GFP_ATOMIC        296 net/netfilter/nfnetlink_log.c 	skb = alloc_skb(n, GFP_ATOMIC);
GFP_ATOMIC        305 net/netfilter/nfnetlink_log.c 			skb = alloc_skb(pkt_size, GFP_ATOMIC);
GFP_ATOMIC        102 net/netfilter/nfnetlink_queue.c 	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
GFP_ATOMIC        269 net/netfilter/nfnetlink_queue.c 	skb = alloc_skb(size, GFP_ATOMIC);
GFP_ATOMIC        457 net/netfilter/nfnetlink_queue.c 					       diff, GFP_ATOMIC);
GFP_ATOMIC        124 net/netfilter/xt_TCPMSS.c 				     GFP_ATOMIC))
GFP_ATOMIC        169 net/netfilter/xt_connlimit.c 		conn = kzalloc(sizeof(*conn), GFP_ATOMIC);
GFP_ATOMIC        165 net/netfilter/xt_hashlimit.c 	ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
GFP_ATOMIC        855 net/netfilter/xt_hashlimit.c 	bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
GFP_ATOMIC        159 net/netfilter/xt_recent.c 		    GFP_ATOMIC);
GFP_ATOMIC         83 net/netlabel/netlabel_kapi.c 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
GFP_ATOMIC         87 net/netlabel/netlabel_kapi.c 		entry->domain = kstrdup(domain, GFP_ATOMIC);
GFP_ATOMIC        133 net/netlabel/netlabel_kapi.c 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
GFP_ATOMIC        137 net/netlabel/netlabel_kapi.c 		entry->domain = kstrdup(domain, GFP_ATOMIC);
GFP_ATOMIC        386 net/netlabel/netlabel_kapi.c 		ret_val = netlbl_secattr_catmap_setbit(iter, spot, GFP_ATOMIC);
GFP_ATOMIC        323 net/netlabel/netlabel_unlabeled.c 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
GFP_ATOMIC        365 net/netlabel/netlabel_unlabeled.c 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
GFP_ATOMIC        405 net/netlabel/netlabel_unlabeled.c 	iface = kzalloc(sizeof(*iface), GFP_ATOMIC);
GFP_ATOMIC        106 net/netlabel/netlabel_user.c 	audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type);
GFP_ATOMIC        245 net/netlink/af_netlink.c 		return kzalloc(size, GFP_ATOMIC);
GFP_ATOMIC        248 net/netlink/af_netlink.c 			__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
GFP_ATOMIC        595 net/netlink/af_netlink.c 	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
GFP_ATOMIC       1462 net/netlink/af_netlink.c 		listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC);
GFP_ATOMIC        440 net/netrom/af_netrom.c 	sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
GFP_ATOMIC        483 net/netrom/af_netrom.c 	sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot);
GFP_ATOMIC         51 net/netrom/nr_in.c 		if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL)
GFP_ATOMIC         36 net/netrom/nr_loopback.c 	if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) {
GFP_ATOMIC        108 net/netrom/nr_out.c 	if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        159 net/netrom/nr_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        140 net/netrom/nr_route.c 		if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        179 net/netrom/nr_route.c 		if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        422 net/netrom/nr_route.c 	if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
GFP_ATOMIC        829 net/netrom/nr_route.c 	if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        147 net/netrom/nr_subr.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        222 net/netrom/nr_subr.c 	if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        308 net/packet/af_packet.c 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        527 net/packet/af_packet.c 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        654 net/packet/af_packet.c 				copy_skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        198 net/phonet/af_phonet.c 	struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
GFP_ATOMIC        136 net/phonet/pep.c 	return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC);
GFP_ATOMIC        210 net/phonet/pep.c 				PEP_IND_READY, GFP_ATOMIC);
GFP_ATOMIC        218 net/phonet/pep.c 					GFP_ATOMIC) == 0)
GFP_ATOMIC        321 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
GFP_ATOMIC        329 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
GFP_ATOMIC        347 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
GFP_ATOMIC        485 net/phonet/pep.c 	skb = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        490 net/phonet/pep.c 	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
GFP_ATOMIC        593 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
GFP_ATOMIC        597 net/phonet/pep.c 		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
GFP_ATOMIC        928 net/phonet/pep.c 	rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
GFP_ATOMIC         42 net/phonet/pn_dev.c 	struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC);
GFP_ATOMIC        529 net/rose/af_rose.c 	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
GFP_ATOMIC        568 net/rose/af_rose.c 	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
GFP_ATOMIC         69 net/rose/rose_dev.c 	if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        181 net/rose/rose_link.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        210 net/rose/rose_link.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        238 net/rose/rose_link.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
GFP_ATOMIC         38 net/rose/rose_loopback.c 	skbn = skb_clone(skb, GFP_ATOMIC);
GFP_ATOMIC         79 net/rose/rose_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC         87 net/rose/rose_route.c 		rose_neigh = kmalloc(sizeof(*rose_neigh), GFP_ATOMIC);
GFP_ATOMIC        151 net/rose/rose_route.c 		rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC);
GFP_ATOMIC        416 net/rose/rose_route.c 	if ((rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC       1038 net/rose/rose_route.c 	if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        126 net/rose/rose_subr.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
GFP_ATOMIC        474 net/rxrpc/ar-input.c 		part = skb_clone(jumbo, GFP_ATOMIC);
GFP_ATOMIC        204 net/sched/act_ipt.c 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        259 net/sched/act_ipt.c 	t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
GFP_ATOMIC        176 net/sched/act_mirred.c 	skb2 = skb_act_clone(skb, GFP_ATOMIC);
GFP_ATOMIC        148 net/sched/act_nat.c 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        175 net/sched/act_nat.c 		     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
GFP_ATOMIC        189 net/sched/act_nat.c 		     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
GFP_ATOMIC        227 net/sched/act_nat.c 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
GFP_ATOMIC        271 net/sched/act_nat.c 	opt = kzalloc(s, GFP_ATOMIC);
GFP_ATOMIC        131 net/sched/act_pedit.c 		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
GFP_ATOMIC        203 net/sched/act_pedit.c 	opt = kzalloc(s, GFP_ATOMIC);
GFP_ATOMIC        189 net/sched/sch_netem.c 	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
GFP_ATOMIC        205 net/sched/sch_netem.c 		if (!(skb = skb_unshare(skb, GFP_ATOMIC))
GFP_ATOMIC        816 net/sctp/associola.c 				0, spc_state, error, GFP_ATOMIC);
GFP_ATOMIC       1068 net/sctp/associola.c 				   state, ep, asoc, chunk, GFP_ATOMIC);
GFP_ATOMIC       1127 net/sctp/associola.c 			 asoc->peer.i.initial_tsn, GFP_ATOMIC);
GFP_ATOMIC       1172 net/sctp/associola.c 						    GFP_ATOMIC, trans->state);
GFP_ATOMIC       1187 net/sctp/associola.c 			sctp_assoc_set_id(asoc, GFP_ATOMIC);
GFP_ATOMIC       1207 net/sctp/associola.c 	sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
GFP_ATOMIC        115 net/sctp/chunk.c 							    error, GFP_ATOMIC);
GFP_ATOMIC        439 net/sctp/endpointola.c 								GFP_ATOMIC);
GFP_ATOMIC        475 net/sctp/endpointola.c 				   ep, asoc, chunk, GFP_ATOMIC);
GFP_ATOMIC        418 net/sctp/input.c 		   GFP_ATOMIC);
GFP_ATOMIC         96 net/sctp/ipv6.c 		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
GFP_ATOMIC        384 net/sctp/ipv6.c 		addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
GFP_ATOMIC        390 net/sctp/output.c 	nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
GFP_ATOMIC        524 net/sctp/output.c 					GFP_ATOMIC);
GFP_ATOMIC        182 net/sctp/protocol.c 		addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
GFP_ATOMIC        258 net/sctp/protocol.c 						    SCTP_ADDR_SRC, GFP_ATOMIC);
GFP_ATOMIC        676 net/sctp/protocol.c 		addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
GFP_ATOMIC       1223 net/sctp/protocol.c 					__get_free_pages(GFP_ATOMIC, order);
GFP_ATOMIC       1256 net/sctp/protocol.c 					__get_free_pages(GFP_ATOMIC, order);
GFP_ATOMIC       1186 net/sctp/sm_make_chunk.c 	retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC);
GFP_ATOMIC       1267 net/sctp/sm_make_chunk.c 			GFP_ATOMIC);
GFP_ATOMIC       1503 net/sctp/sm_make_chunk.c 	retval = kzalloc(*cookie_len, GFP_ATOMIC);
GFP_ATOMIC       1713 net/sctp/sm_make_chunk.c 						 GFP_ATOMIC) < 0) {
GFP_ATOMIC       1721 net/sctp/sm_make_chunk.c 				SCTP_ADDR_SRC, GFP_ATOMIC);
GFP_ATOMIC       2898 net/sctp/sm_make_chunk.c 		peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
GFP_ATOMIC        274 net/sctp/sm_sideeffect.c 			   transport, GFP_ATOMIC);
GFP_ATOMIC        314 net/sctp/sm_sideeffect.c 			   (void *)timeout_type, GFP_ATOMIC);
GFP_ATOMIC        390 net/sctp/sm_sideeffect.c 			   transport, GFP_ATOMIC);
GFP_ATOMIC        478 net/sctp/sm_sideeffect.c 						GFP_ATOMIC);
GFP_ATOMIC        503 net/sctp/sm_sideeffect.c 	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
GFP_ATOMIC        508 net/sctp/sm_sideeffect.c 						GFP_ATOMIC);
GFP_ATOMIC        512 net/sctp/sm_sideeffect.c 						GFP_ATOMIC);
GFP_ATOMIC        674 net/sctp/sm_sideeffect.c 				 GFP_ATOMIC);
GFP_ATOMIC        875 net/sctp/sm_sideeffect.c 					    NULL, GFP_ATOMIC);
GFP_ATOMIC        886 net/sctp/sm_sideeffect.c 	ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
GFP_ATOMIC       1167 net/sctp/sm_sideeffect.c 			sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
GFP_ATOMIC       1192 net/sctp/sm_sideeffect.c 			new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
GFP_ATOMIC       1276 net/sctp/sm_sideeffect.c 					    GFP_ATOMIC);
GFP_ATOMIC       1508 net/sctp/sm_sideeffect.c 						   GFP_ATOMIC);
GFP_ATOMIC       1513 net/sctp/sm_sideeffect.c 					 GFP_ATOMIC);
GFP_ATOMIC       1552 net/sctp/sm_sideeffect.c 						GFP_ATOMIC);
GFP_ATOMIC        237 net/sctp/sm_statefuns.c 					     0, 0, 0, NULL, GFP_ATOMIC);
GFP_ATOMIC        374 net/sctp/sm_statefuns.c 	new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
GFP_ATOMIC        382 net/sctp/sm_statefuns.c 			       GFP_ATOMIC))
GFP_ATOMIC        395 net/sctp/sm_statefuns.c 	if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
GFP_ATOMIC        398 net/sctp/sm_statefuns.c 	repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
GFP_ATOMIC        675 net/sctp/sm_statefuns.c 	new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
GFP_ATOMIC        716 net/sctp/sm_statefuns.c 			       peer_init, GFP_ATOMIC))
GFP_ATOMIC        723 net/sctp/sm_statefuns.c 	error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC);
GFP_ATOMIC        770 net/sctp/sm_statefuns.c 					     NULL, GFP_ATOMIC);
GFP_ATOMIC        781 net/sctp/sm_statefuns.c 							    GFP_ATOMIC);
GFP_ATOMIC        896 net/sctp/sm_statefuns.c 					     NULL, GFP_ATOMIC);
GFP_ATOMIC        909 net/sctp/sm_statefuns.c 		ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
GFP_ATOMIC       1441 net/sctp/sm_statefuns.c 	new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
GFP_ATOMIC       1452 net/sctp/sm_statefuns.c 			       GFP_ATOMIC))
GFP_ATOMIC       1481 net/sctp/sm_statefuns.c 	if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
GFP_ATOMIC       1484 net/sctp/sm_statefuns.c 	repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
GFP_ATOMIC       1684 net/sctp/sm_statefuns.c 			       GFP_ATOMIC))
GFP_ATOMIC       1731 net/sctp/sm_statefuns.c 					     NULL, GFP_ATOMIC);
GFP_ATOMIC       1770 net/sctp/sm_statefuns.c 			       GFP_ATOMIC))
GFP_ATOMIC       1884 net/sctp/sm_statefuns.c 					     NULL, GFP_ATOMIC);
GFP_ATOMIC       1895 net/sctp/sm_statefuns.c 								 GFP_ATOMIC);
GFP_ATOMIC       1976 net/sctp/sm_statefuns.c 	new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
GFP_ATOMIC       2277 net/sctp/sm_statefuns.c 	reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht));
GFP_ATOMIC       2566 net/sctp/sm_statefuns.c 	ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
GFP_ATOMIC       3126 net/sctp/sm_statefuns.c 						     GFP_ATOMIC);
GFP_ATOMIC       3174 net/sctp/sm_statefuns.c 					     0, 0, 0, NULL, GFP_ATOMIC);
GFP_ATOMIC       3821 net/sctp/sm_statefuns.c 	save_digest = kmemdup(digest, sig_len, GFP_ATOMIC);
GFP_ATOMIC       3829 net/sctp/sm_statefuns.c 				GFP_ATOMIC);
GFP_ATOMIC       3905 net/sctp/sm_statefuns.c 				    SCTP_AUTH_NEWKEY, GFP_ATOMIC);
GFP_ATOMIC       4400 net/sctp/sm_statefuns.c 	repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0);
GFP_ATOMIC       5244 net/sctp/sm_statefuns.c 		repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
GFP_ATOMIC       5720 net/sctp/sm_statefuns.c 	transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC);
GFP_ATOMIC        402 net/sctp/socket.c 	ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC);
GFP_ATOMIC        596 net/sctp/socket.c 						    SCTP_ADDR_NEW, GFP_ATOMIC);
GFP_ATOMIC       5948 net/sctp/socket.c 	pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
GFP_ATOMIC        374 net/sctp/tsnmap.c 	new = kzalloc(len>>3, GFP_ATOMIC);
GFP_ATOMIC        360 net/sctp/ulpqueue.c 			new = skb_copy(f_frag, GFP_ATOMIC);
GFP_ATOMIC        547 net/sunrpc/rpcb_clnt.c 	map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC);
GFP_ATOMIC        732 net/sunrpc/sched.c 	gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
GFP_ATOMIC        106 net/sunrpc/socklib.c 			*ppage = alloc_page(GFP_ATOMIC);
GFP_ATOMIC        566 net/sunrpc/xprtrdma/transport.c 			nreq = kmalloc(sizeof *req + size, GFP_ATOMIC);
GFP_ATOMIC       1464 net/sunrpc/xprtsock.c 		sk->sk_allocation = GFP_ATOMIC;
GFP_ATOMIC       1599 net/sunrpc/xprtsock.c 		sk->sk_allocation = GFP_ATOMIC;
GFP_ATOMIC        777 net/tipc/bcast.c 	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
GFP_ATOMIC        778 net/tipc/bcast.c 	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
GFP_ATOMIC        806 net/tipc/bcast.c 		char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
GFP_ATOMIC        168 net/tipc/bcast.h 			item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
GFP_ATOMIC        668 net/tipc/bearer.c 	tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
GFP_ATOMIC        669 net/tipc/bearer.c 	media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
GFP_ATOMIC         61 net/tipc/cluster.c 	c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC);
GFP_ATOMIC         73 net/tipc/cluster.c 	c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC);
GFP_ATOMIC        519 net/tipc/cluster.c 			buf_copy = skb_copy(buf, GFP_ATOMIC);
GFP_ATOMIC        554 net/tipc/cluster.c 					buf_copy = skb_copy(buf, GFP_ATOMIC);
GFP_ATOMIC         83 net/tipc/config.c 	buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
GFP_ATOMIC        212 net/tipc/config.c 							    GFP_ATOMIC);
GFP_ATOMIC        342 net/tipc/core.h 	skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
GFP_ATOMIC        160 net/tipc/dbg.c 		cp_buf = kmalloc(pb->size, GFP_ATOMIC);
GFP_ATOMIC        359 net/tipc/dbg.c 		tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
GFP_ATOMIC        327 net/tipc/discover.c 	req = kmalloc(sizeof(*req), GFP_ATOMIC);
GFP_ATOMIC         75 net/tipc/eth_media.c 	clone = skb_clone(buf, GFP_ATOMIC);
GFP_ATOMIC         65 net/tipc/handler.c 	item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
GFP_ATOMIC        438 net/tipc/link.c 	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
GFP_ATOMIC        445 net/tipc/link.c 		char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
GFP_ATOMIC        689 net/tipc/link.c 	ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
GFP_ATOMIC        130 net/tipc/name_table.c 	struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
GFP_ATOMIC        155 net/tipc/name_table.c 	struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
GFP_ATOMIC        167 net/tipc/name_table.c 	struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
GFP_ATOMIC       1078 net/tipc/name_table.c 			      GFP_ATOMIC);
GFP_ATOMIC        164 net/tipc/net.c 	tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
GFP_ATOMIC         86 net/tipc/node.c 	n_ptr = kzalloc(sizeof(*n_ptr),GFP_ATOMIC);
GFP_ATOMIC        133 net/tipc/port.c 			ibuf = skb_copy(buf, GFP_ATOMIC);
GFP_ATOMIC        194 net/tipc/port.c 			struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
GFP_ATOMIC        228 net/tipc/port.c 	p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
GFP_ATOMIC       1044 net/tipc/port.c 	up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
GFP_ATOMIC        353 net/tipc/subscr.c 	sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
GFP_ATOMIC        489 net/tipc/subscr.c 	subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC);
GFP_ATOMIC         85 net/tipc/user_reg.c 		users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
GFP_ATOMIC         55 net/tipc/zone.c 	z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC);
GFP_ATOMIC       1250 net/wireless/wext.c 		rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
GFP_ATOMIC       1306 net/wireless/wext.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
GFP_ATOMIC       1389 net/wireless/wext.c 	event = kmalloc(event_len, GFP_ATOMIC);
GFP_ATOMIC        475 net/x25/af_x25.c 	struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto);
GFP_ATOMIC        101 net/x25/x25_dev.c 	nskb = skb_copy(skb, GFP_ATOMIC);
GFP_ATOMIC        145 net/x25/x25_dev.c 			if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC        179 net/x25/x25_dev.c 	skb = alloc_skb(1, GFP_ATOMIC);
GFP_ATOMIC         62 net/x25/x25_forward.c 						GFP_ATOMIC)) == NULL){
GFP_ATOMIC         75 net/x25/x25_forward.c 	if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
GFP_ATOMIC        120 net/x25/x25_forward.c 	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
GFP_ATOMIC         49 net/x25/x25_in.c 		if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){
GFP_ATOMIC        116 net/x25/x25_link.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
GFP_ATOMIC        143 net/x25/x25_link.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
GFP_ATOMIC        170 net/x25/x25_link.c 	struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
GFP_ATOMIC        244 net/x25/x25_link.c 	struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
GFP_ATOMIC        180 net/x25/x25_out.c 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
GFP_ATOMIC         47 net/x25/x25_route.c 	rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
GFP_ATOMIC        156 net/x25/x25_subr.c 	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
GFP_ATOMIC         32 net/xfrm/xfrm_input.c 	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
GFP_ATOMIC         80 net/xfrm/xfrm_ipcomp.c 		frag->page = alloc_page(GFP_ATOMIC);
GFP_ATOMIC         37 net/xfrm/xfrm_output.c 	return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
GFP_ATOMIC       1156 net/xfrm/xfrm_policy.c 	struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC);
GFP_ATOMIC       1508 net/xfrm/xfrm_policy.c 		*target = kmalloc(size, GFP_ATOMIC);
GFP_ATOMIC        511 net/xfrm/xfrm_state.c 	x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
GFP_ATOMIC        645 net/xfrm/xfrm_user.c 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
GFP_ATOMIC        711 net/xfrm/xfrm_user.c 	r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
GFP_ATOMIC        764 net/xfrm/xfrm_user.c 	r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
GFP_ATOMIC       1467 net/xfrm/xfrm_user.c 	r_skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
GFP_ATOMIC       1874 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
GFP_ATOMIC       1882 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC);
GFP_ATOMIC       2038 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
GFP_ATOMIC       2045 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
GFP_ATOMIC       2052 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_aevent_msgsize(), GFP_ATOMIC);
GFP_ATOMIC       2059 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC);
GFP_ATOMIC       2069 net/xfrm/xfrm_user.c 	skb = nlmsg_new(len, GFP_ATOMIC);
GFP_ATOMIC       2084 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
GFP_ATOMIC       2128 net/xfrm/xfrm_user.c 	skb = nlmsg_new(len, GFP_ATOMIC);
GFP_ATOMIC       2158 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
GFP_ATOMIC       2240 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
GFP_ATOMIC       2247 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC);
GFP_ATOMIC       2349 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
GFP_ATOMIC       2356 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
GFP_ATOMIC       2376 net/xfrm/xfrm_user.c 	skb = nlmsg_new(len, GFP_ATOMIC);
GFP_ATOMIC       2411 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
GFP_ATOMIC       2423 net/xfrm/xfrm_user.c 	skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
GFP_ATOMIC       2435 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
GFP_ATOMIC       2496 net/xfrm/xfrm_user.c 	skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
GFP_ATOMIC       2503 net/xfrm/xfrm_user.c 	return nlmsg_multicast(xfrm_nl, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC);
GFP_ATOMIC        340 security/selinux/avc.c 	node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
GFP_ATOMIC        553 security/selinux/avc.c 	ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC);
GFP_ATOMIC        716 security/selinux/avc.c 	c = kmalloc(sizeof(*c), GFP_ATOMIC);
GFP_ATOMIC        476 security/selinux/hooks.c 	opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
GFP_ATOMIC        482 security/selinux/hooks.c 	opts->mnt_opts_flags = kcalloc(opts->num_mnt_opts, sizeof(int), GFP_ATOMIC);
GFP_ATOMIC        896 security/selinux/hooks.c 	opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_ATOMIC);
GFP_ATOMIC        900 security/selinux/hooks.c 	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC);
GFP_ATOMIC        173 security/selinux/netif.c 	new = kzalloc(sizeof(*new), GFP_ATOMIC);
GFP_ATOMIC         89 security/selinux/netlabel.c 	secattr = netlbl_secattr_alloc(GFP_ATOMIC);
GFP_ATOMIC        227 security/selinux/netnode.c 	new = kzalloc(sizeof(*new), GFP_ATOMIC);
GFP_ATOMIC        174 security/selinux/netport.c 	new = kzalloc(sizeof(*new), GFP_ATOMIC);
GFP_ATOMIC        117 security/selinux/ss/context.h 		dst->str = kstrdup(src->str, GFP_ATOMIC);
GFP_ATOMIC         55 security/selinux/ss/ebitmap.c 		new = kzalloc(sizeof(*new), GFP_ATOMIC);
GFP_ATOMIC        106 security/selinux/ss/ebitmap.c 	c_iter = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
GFP_ATOMIC        120 security/selinux/ss/ebitmap.c 				  = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
GFP_ATOMIC        180 security/selinux/ss/ebitmap.c 				e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
GFP_ATOMIC        307 security/selinux/ss/ebitmap.c 	new = kzalloc(sizeof(*new), GFP_ATOMIC);
GFP_ATOMIC       1416 security/selinux/ss/policydb.c 	levdatum = kzalloc(sizeof(*levdatum), GFP_ATOMIC);
GFP_ATOMIC       1429 security/selinux/ss/policydb.c 	key = kmalloc(len + 1, GFP_ATOMIC);
GFP_ATOMIC       1439 security/selinux/ss/policydb.c 	levdatum->level = kmalloc(sizeof(struct mls_level), GFP_ATOMIC);
GFP_ATOMIC       1467 security/selinux/ss/policydb.c 	catdatum = kzalloc(sizeof(*catdatum), GFP_ATOMIC);
GFP_ATOMIC       1481 security/selinux/ss/policydb.c 	key = kmalloc(len + 1, GFP_ATOMIC);
GFP_ATOMIC        363 security/selinux/ss/services.c 				     GFP_ATOMIC, AUDIT_SELINUX_ERR);
GFP_ATOMIC        571 security/selinux/ss/services.c 	audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
GFP_ATOMIC        793 security/selinux/ss/services.c 		*scontext = kstrdup(context->str, GFP_ATOMIC);
GFP_ATOMIC        806 security/selinux/ss/services.c 	scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
GFP_ATOMIC        852 security/selinux/ss/services.c 			scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
GFP_ATOMIC       1114 security/selinux/ss/services.c 	audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
GFP_ATOMIC       1968 security/selinux/ss/services.c 	mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
GFP_ATOMIC       1990 security/selinux/ss/services.c 				mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC);
GFP_ATOMIC       2160 security/selinux/ss/services.c        *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC);
GFP_ATOMIC       2164 security/selinux/ss/services.c        *values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
GFP_ATOMIC       2172 security/selinux/ss/services.c 	       (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
GFP_ATOMIC       2208 security/selinux/ss/services.c 			audit_log(current->audit_context, GFP_ATOMIC,
GFP_ATOMIC       2347 security/selinux/ss/services.c 		audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
GFP_ATOMIC       2448 security/selinux/ss/services.c 	classes[value] = kstrdup(name, GFP_ATOMIC);
GFP_ATOMIC       2462 security/selinux/ss/services.c 	*classes = kcalloc(*nclasses, sizeof(*classes), GFP_ATOMIC);
GFP_ATOMIC       2486 security/selinux/ss/services.c 	perms[value] = kstrdup(name, GFP_ATOMIC);
GFP_ATOMIC       2509 security/selinux/ss/services.c 	*perms = kcalloc(*nperms, sizeof(*perms), GFP_ATOMIC);
GFP_ATOMIC       2660 security/selinux/ss/services.c 		rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC);
GFP_ATOMIC       2710 security/selinux/ss/services.c 		audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
GFP_ATOMIC       2718 security/selinux/ss/services.c 		audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
GFP_ATOMIC       2726 security/selinux/ss/services.c 		audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
GFP_ATOMIC       2855 security/selinux/ss/services.c 	sid_cache = kmalloc(sizeof(*sid_cache), GFP_ATOMIC);
GFP_ATOMIC       2858 security/selinux/ss/services.c 	secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
GFP_ATOMIC       2971 security/selinux/ss/services.c 				  GFP_ATOMIC);
GFP_ATOMIC         21 security/selinux/ss/sidtab.c 	s->htable = kmalloc(sizeof(*(s->htable)) * SIDTAB_SIZE, GFP_ATOMIC);
GFP_ATOMIC         56 security/selinux/ss/sidtab.c 	newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
GFP_ATOMIC        257 security/selinux/xfrm.c 			      GFP_ATOMIC);
GFP_ATOMIC       1272 security/smack/smack_lsm.c 	sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
GFP_ATOMIC       1280 security/smack/smack_lsm.c 							  cat, GFP_ATOMIC);
GFP_ATOMIC        158 sound/core/control.c 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
GFP_ATOMIC       1612 sound/core/pcm_native.c 		substream->group = kmalloc(sizeof(struct snd_pcm_group), GFP_ATOMIC);
GFP_ATOMIC        199 sound/isa/gus/gus_dma.c 	block = kmalloc(sizeof(*block), atomic ? GFP_ATOMIC : GFP_KERNEL);
GFP_ATOMIC         92 sound/oss/dmabuf.c 		start_addr = (char *) __get_free_pages(GFP_ATOMIC|GFP_DMA|__GFP_NOWARN, sz);
GFP_ATOMIC        805 sound/oss/sscape.c 		start_addr = (char *) __get_free_pages(GFP_ATOMIC|GFP_DMA, sz);
GFP_ATOMIC       1134 sound/pci/cmipci.c 		val = kmalloc(sizeof(*val), GFP_ATOMIC);
GFP_ATOMIC        435 sound/pci/emu10k1/emufx.c 	irq = kmalloc(sizeof(*irq), GFP_ATOMIC);
GFP_ATOMIC        469 sound/pci/emu10k1/memory.c 			p = alloc_page(GFP_ATOMIC | GFP_DMA |
GFP_ATOMIC       2536 sound/sparc/dbri.c 				       &dbri->dma_dvma, GFP_ATOMIC);
GFP_ATOMIC        119 sound/usb/caiaq/caiaq-audio.c 		ret = usb_submit_urb(dev->data_urbs_in[i], GFP_ATOMIC);
GFP_ATOMIC        494 sound/usb/caiaq/caiaq-audio.c 		usb_submit_urb(out, GFP_ATOMIC);
GFP_ATOMIC        506 sound/usb/caiaq/caiaq-audio.c 	usb_submit_urb(urb, GFP_ATOMIC);
GFP_ATOMIC        174 sound/usb/caiaq/caiaq-device.c 	ret = usb_submit_urb(&dev->ep1_in_urb, GFP_ATOMIC);
GFP_ATOMIC         80 sound/usb/caiaq/caiaq-midi.c 	ret = usb_submit_urb(&dev->midi_out_urb, GFP_ATOMIC);
GFP_ATOMIC        696 sound/usb/usbaudio.c 	    (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
GFP_ATOMIC        719 sound/usb/usbaudio.c 	    (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
GFP_ATOMIC        869 sound/usb/usbaudio.c 		err = usb_submit_urb(subs->dataurb[i].urb, GFP_ATOMIC);
GFP_ATOMIC        880 sound/usb/usbaudio.c 			err = usb_submit_urb(subs->syncurb[i].urb, GFP_ATOMIC);
GFP_ATOMIC        249 sound/usb/usbmidi.c 	snd_usbmidi_submit_urb(urb, GFP_ATOMIC);
GFP_ATOMIC        293 sound/usb/usbmidi.c 		ep->urb_active = snd_usbmidi_submit_urb(urb, GFP_ATOMIC) >= 0;
GFP_ATOMIC        321 sound/usb/usbmidi.c 			snd_usbmidi_submit_urb(in->urb, GFP_ATOMIC);
GFP_ATOMIC       1700 sound/usb/usbmixer.c 		usb_submit_urb(mixer->rc_urb, GFP_ATOMIC);
GFP_ATOMIC       1740 sound/usb/usbmixer.c 		usb_submit_urb(urb, GFP_ATOMIC);
GFP_ATOMIC        360 sound/usb/usx2y/usb_stream.c 	err = usb_submit_urb(sk->idle_inurb, GFP_ATOMIC);
GFP_ATOMIC        367 sound/usb/usx2y/usb_stream.c 	err = usb_submit_urb(sk->idle_outurb, GFP_ATOMIC);
GFP_ATOMIC        692 sound/usb/usx2y/usb_stream.c 		err = usb_submit_urb(inurb, GFP_ATOMIC);
GFP_ATOMIC        699 sound/usb/usx2y/usb_stream.c 		err = usb_submit_urb(outurb, GFP_ATOMIC);
GFP_ATOMIC        231 sound/usb/usx2y/usbusx2y.c 				err = usb_submit_urb(usX2Y->US04->urb[usX2Y->US04->submitted++], GFP_ATOMIC);
GFP_ATOMIC        246 sound/usb/usx2y/usbusx2y.c 						err = usb_submit_urb(usX2Y->AS04.urb[j], GFP_ATOMIC);
GFP_ATOMIC        257 sound/usb/usx2y/usbusx2y.c 	usb_submit_urb(urb, GFP_ATOMIC);
GFP_ATOMIC        203 sound/usb/usx2y/usbusx2yaudio.c 	if ((err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
GFP_ATOMIC        509 sound/usb/usx2y/usbusx2yaudio.c 			if ((err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {