kmem_cache_alloc  130 arch/x86/kernel/i387.c 		tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
kmem_cache_alloc   23 arch/x86/kernel/process.c 		dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
kmem_cache_alloc  328 block/blk-integrity.c 		bi = kmem_cache_alloc(integrity_cachep,
kmem_cache_alloc  241 fs/adfs/super.c 	ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc   76 fs/affs/super.c 	i = kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  477 fs/afs/super.c 	vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  417 fs/aio.c       	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
kmem_cache_alloc  280 fs/befs/linuxvfs.c         bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep,
kmem_cache_alloc  256 fs/bfs/inode.c 	bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  260 fs/block_dev.c 	struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
kmem_cache_alloc 3235 fs/buffer.c    	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
kmem_cache_alloc  300 fs/cifs/cifsfs.c 	cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  119 fs/cifs/transport.c 	temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
kmem_cache_alloc   46 fs/coda/inode.c 	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  917 fs/dcache.c    	dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
kmem_cache_alloc   94 fs/dcookies.c  	struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache,
kmem_cache_alloc   82 fs/dnotify.c   	dn = kmem_cache_alloc(dn_cache, GFP_KERNEL);
kmem_cache_alloc 1580 fs/ecryptfs/crypto.c 	page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, GFP_USER);
kmem_cache_alloc 1817 fs/ecryptfs/crypto.c 	tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL);
kmem_cache_alloc  306 fs/ecryptfs/inode.c 				    kmem_cache_alloc(ecryptfs_dentry_info_cache,
kmem_cache_alloc  758 fs/ecryptfs/inode.c 				  kmem_cache_alloc(ecryptfs_file_info_cache,
kmem_cache_alloc 1764 fs/ecryptfs/keystore.c 	key_rec = kmem_cache_alloc(ecryptfs_key_record_cache, GFP_KERNEL);
kmem_cache_alloc 1847 fs/ecryptfs/keystore.c 	new_key_sig = kmem_cache_alloc(ecryptfs_key_sig_cache, GFP_KERNEL);
kmem_cache_alloc  149 fs/ecryptfs/kthread.c 	req = kmem_cache_alloc(ecryptfs_open_req_cache, GFP_KERNEL);
kmem_cache_alloc  412 fs/ecryptfs/mmap.c 	xattr_virt = kmem_cache_alloc(ecryptfs_xattr_cache, GFP_KERNEL);
kmem_cache_alloc   53 fs/ecryptfs/super.c 	inode_info = kmem_cache_alloc(ecryptfs_inode_info_cache, GFP_KERNEL);
kmem_cache_alloc   62 fs/efs/super.c 	ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  663 fs/eventpoll.c 	if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
kmem_cache_alloc  707 fs/eventpoll.c 	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
kmem_cache_alloc  889 fs/exec.c      		newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
kmem_cache_alloc  145 fs/ext2/super.c 	ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  450 fs/ext3/super.c 	ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
kmem_cache_alloc 3422 fs/ext4/mballoc.c 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
kmem_cache_alloc 3511 fs/ext4/mballoc.c 	pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
kmem_cache_alloc 3716 fs/ext4/mballoc.c 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
kmem_cache_alloc 3818 fs/ext4/mballoc.c 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
kmem_cache_alloc 4099 fs/ext4/mballoc.c 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
kmem_cache_alloc 4320 fs/ext4/mballoc.c 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
kmem_cache_alloc 4518 fs/ext4/mballoc.c 	ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
kmem_cache_alloc  570 fs/ext4/super.c 	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
kmem_cache_alloc   64 fs/fat/cache.c 	return kmem_cache_alloc(fat_cache_cachep, GFP_NOFS);
kmem_cache_alloc  490 fs/fat/inode.c 	ei = kmem_cache_alloc(fat_inode_cachep, GFP_NOFS);
kmem_cache_alloc  514 fs/fcntl.c     		new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
kmem_cache_alloc  301 fs/file.c      	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
kmem_cache_alloc  101 fs/freevxfs/vxfs_inode.c 		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
kmem_cache_alloc  143 fs/freevxfs/vxfs_inode.c 		if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
kmem_cache_alloc   44 fs/fuse/dev.c  	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
kmem_cache_alloc   52 fs/fuse/dev.c  	struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
kmem_cache_alloc   54 fs/fuse/inode.c 	inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  684 fs/gfs2/glock.c 	gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
kmem_cache_alloc  545 fs/gfs2/ops_super.c 	ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  148 fs/hfs/super.c 	i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  461 fs/hfsplus/super.c 	i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  164 fs/hpfs/super.c 	ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
kmem_cache_alloc  685 fs/hugetlbfs/inode.c 	p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  121 fs/inode.c     		inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL);
kmem_cache_alloc  192 fs/inotify_user.c 	kevent = kmem_cache_alloc(event_cachep, GFP_NOFS);
kmem_cache_alloc  396 fs/inotify_user.c 	watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
kmem_cache_alloc   65 fs/isofs/inode.c 	ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc 1655 fs/jbd/journal.c 	ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
kmem_cache_alloc 1665 fs/jbd/journal.c 			ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
kmem_cache_alloc  126 fs/jbd/revoke.c 	record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
kmem_cache_alloc  214 fs/jbd/revoke.c 	table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
kmem_cache_alloc 2022 fs/jbd2/journal.c 	ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
kmem_cache_alloc 2032 fs/jbd2/journal.c 			ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
kmem_cache_alloc  127 fs/jbd2/revoke.c 	record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS);
kmem_cache_alloc  213 fs/jbd2/revoke.c 	table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
kmem_cache_alloc  137 fs/jffs2/malloc.c 	ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
kmem_cache_alloc  151 fs/jffs2/malloc.c 	ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
kmem_cache_alloc  165 fs/jffs2/malloc.c 	ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
kmem_cache_alloc  179 fs/jffs2/malloc.c 	ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
kmem_cache_alloc  195 fs/jffs2/malloc.c 	ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
kmem_cache_alloc  258 fs/jffs2/malloc.c 	ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
kmem_cache_alloc  272 fs/jffs2/malloc.c 	ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
kmem_cache_alloc  287 fs/jffs2/malloc.c 	xd = kmem_cache_alloc(xattr_datum_cache, GFP_KERNEL);
kmem_cache_alloc  306 fs/jffs2/malloc.c 	ref = kmem_cache_alloc(xattr_ref_cache, GFP_KERNEL);
kmem_cache_alloc   36 fs/jffs2/super.c 	f = kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  110 fs/jfs/super.c 	jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
kmem_cache_alloc  151 fs/locks.c     	return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
kmem_cache_alloc  406 fs/mbcache.c   	ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
kmem_cache_alloc   60 fs/minix/inode.c 	ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL);
kmem_cache_alloc   56 fs/ncpfs/inode.c 	ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  152 fs/nfs/direct.c 	dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
kmem_cache_alloc 1215 fs/nfs/inode.c 	nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc   30 fs/nfs/pagelist.c 	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
kmem_cache_alloc  201 fs/nfsd/nfs4state.c 	dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
kmem_cache_alloc  912 fs/nfsd/nfs4state.c 	fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
kmem_cache_alloc  985 fs/nfsd/nfs4state.c 	if ((sop = kmem_cache_alloc(stateowner_slab, GFP_KERNEL))) {
kmem_cache_alloc 1527 fs/nfsd/nfs4state.c 	return kmem_cache_alloc(stateid_slab, GFP_KERNEL);
kmem_cache_alloc 1272 fs/ntfs/attrib.c 	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
kmem_cache_alloc   41 fs/ntfs/index.c 	ictx = kmem_cache_alloc(ntfs_index_ctx_cache, GFP_NOFS);
kmem_cache_alloc  325 fs/ntfs/inode.c 	ni = kmem_cache_alloc(ntfs_big_inode_cache, GFP_NOFS);
kmem_cache_alloc  350 fs/ntfs/inode.c 	ni = kmem_cache_alloc(ntfs_inode_cache, GFP_NOFS);
kmem_cache_alloc  269 fs/ntfs/unistr.c 		ucs = kmem_cache_alloc(ntfs_name_cache, GFP_NOFS);
kmem_cache_alloc  285 fs/ocfs2/dlm/dlmfs.c 	ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
kmem_cache_alloc  757 fs/ocfs2/dlm/dlmmaster.c 			kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
kmem_cache_alloc 1548 fs/ocfs2/dlm/dlmmaster.c 				kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
kmem_cache_alloc 2459 fs/ocfs2/dlm/dlmmaster.c 	mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
kmem_cache_alloc 3025 fs/ocfs2/dlm/dlmmaster.c 	mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
kmem_cache_alloc  332 fs/ocfs2/super.c 	oi = kmem_cache_alloc(ocfs2_inode_cachep, GFP_NOFS);
kmem_cache_alloc  357 fs/ocfs2/uptodate.c 	new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS);
kmem_cache_alloc  368 fs/ocfs2/uptodate.c 			tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep,
kmem_cache_alloc  341 fs/openpromfs/inode.c 	oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL);
kmem_cache_alloc   83 fs/proc/inode.c 	ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  545 fs/qnx4/inode.c 	ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  512 fs/reiserfs/super.c 	    kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  578 fs/romfs/inode.c 	ei = kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc   59 fs/smbfs/inode.c 	ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  318 fs/sysv/inode.c 	si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  256 fs/ubifs/super.c 	ui = kmem_cache_alloc(ubifs_inode_slab, GFP_NOFS);
kmem_cache_alloc  133 fs/udf/super.c 	ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
kmem_cache_alloc 1293 fs/ufs/super.c 	ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  126 fs/xfs/linux-2.6/kmem.c 		ptr = kmem_cache_alloc(zone, lflags);
kmem_cache_alloc   47 include/asm-xtensa/pgalloc.h 	return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
kmem_cache_alloc 1702 include/linux/fs.h #define __getname()	kmem_cache_alloc(names_cachep, GFP_KERNEL)
kmem_cache_alloc  938 include/linux/jbd.h 	return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
kmem_cache_alloc 1098 include/linux/jbd2.h 	return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
kmem_cache_alloc   46 include/linux/rmap.h 	return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
kmem_cache_alloc  210 include/linux/slab.h void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
kmem_cache_alloc  215 include/linux/slab.h 	return kmem_cache_alloc(cachep, flags);
kmem_cache_alloc  267 include/linux/slab.h 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
kmem_cache_alloc   28 include/linux/slab_def.h void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
kmem_cache_alloc   53 include/linux/slab_def.h 			return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
kmem_cache_alloc   56 include/linux/slab_def.h 		return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
kmem_cache_alloc  204 include/linux/slub_def.h void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
kmem_cache_alloc  224 include/linux/slub_def.h 			return kmem_cache_alloc(s, flags);
kmem_cache_alloc   63 include/net/request_sock.h 	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
kmem_cache_alloc  221 ipc/mqueue.c   	ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
kmem_cache_alloc   93 kernel/fork.c  # define alloc_task_struct()	kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
kmem_cache_alloc  300 kernel/fork.c  		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
kmem_cache_alloc  392 kernel/fork.c  #define allocate_mm()	(kmem_cache_alloc(mm_cachep, GFP_KERNEL))
kmem_cache_alloc  652 kernel/fork.c  	struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
kmem_cache_alloc  747 kernel/fork.c  	sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
kmem_cache_alloc  772 kernel/fork.c  	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
kmem_cache_alloc   36 kernel/nsproxy.c 	ns = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
kmem_cache_alloc  250 kernel/pid.c   	pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
kmem_cache_alloc  195 kernel/signal.c 		q = kmem_cache_alloc(sigqueue_cachep, flags);
kmem_cache_alloc  124 lib/idr.c      		new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
kmem_cache_alloc  168 lib/radix-tree.c 		ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
kmem_cache_alloc  214 lib/radix-tree.c 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
kmem_cache_alloc  547 mm/memcontrol.c 	pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
kmem_cache_alloc  215 mm/mempolicy.c 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
kmem_cache_alloc 1611 mm/mempolicy.c 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
kmem_cache_alloc 1774 mm/mempolicy.c 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
kmem_cache_alloc  289 mm/mempool.c   	return kmem_cache_alloc(mem, gfp_mask);
kmem_cache_alloc 1827 mm/mmap.c      	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
kmem_cache_alloc 2158 mm/mmap.c      		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
kmem_cache_alloc 2338 mm/shmem.c     	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
kmem_cache_alloc 2900 mm/slub.c      		n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
kmem_cache_alloc  326 net/bridge/br_fdb.c 	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
kmem_cache_alloc  400 net/can/af_can.c 	r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
kmem_cache_alloc  210 net/core/flow.c 		fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
kmem_cache_alloc  599 net/core/skbuff.c 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
kmem_cache_alloc  892 net/core/sock.c 		sk = kmem_cache_alloc(slab, priority);
kmem_cache_alloc   30 net/dccp/ackvec.c 			kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
kmem_cache_alloc  153 net/dccp/ackvec.c 	struct dccp_ackvec *av = kmem_cache_alloc(dccp_ackvec_slab, priority);
kmem_cache_alloc  176 net/dccp/ccid.c 	ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
kmem_cache_alloc   46 net/dccp/ccids/lib/loss_interval.c 		lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab,
kmem_cache_alloc   87 net/dccp/ccids/lib/packet_history.c 	struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any());
kmem_cache_alloc  400 net/dccp/ccids/lib/packet_history.c 		h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC);
kmem_cache_alloc  511 net/ipv4/fib_hash.c 		new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
kmem_cache_alloc  390 net/ipv4/fib_trie.c 	struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
kmem_cache_alloc 1257 net/ipv4/fib_trie.c 			new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
kmem_cache_alloc 1295 net/ipv4/fib_trie.c 	new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
kmem_cache_alloc   35 net/ipv4/inet_hashtables.c 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
kmem_cache_alloc  111 net/ipv4/inet_timewait_sock.c 		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
kmem_cache_alloc  387 net/ipv4/inetpeer.c 	n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
kmem_cache_alloc  686 net/ipv6/ip6mr.c 	struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_KERNEL);
kmem_cache_alloc  696 net/ipv6/ip6mr.c 	struct mfc6_cache *c = kmem_cache_alloc(mrt_cachep, GFP_ATOMIC);
kmem_cache_alloc  184 net/ipv6/xfrm6_tunnel.c 	x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
kmem_cache_alloc  192 net/mac80211/mesh.c 	p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
kmem_cache_alloc  228 net/netfilter/nf_conntrack_expect.c 	new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
kmem_cache_alloc  165 net/netfilter/xt_hashlimit.c 	ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
kmem_cache_alloc 5948 net/sctp/socket.c 	pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
kmem_cache_alloc  247 net/socket.c   	ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
kmem_cache_alloc  156 net/sunrpc/rpc_pipe.c 	rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL);
kmem_cache_alloc   95 net/sunrpc/xprtrdma/svc_rdma_transport.c 		ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
kmem_cache_alloc  156 net/sunrpc/xprtrdma/svc_rdma_transport.c 		map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
kmem_cache_alloc   65 net/tipc/handler.c 	item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
kmem_cache_alloc   32 net/xfrm/xfrm_input.c 	sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
kmem_cache_alloc  263 security/keys/key.c 	key = kmem_cache_alloc(key_jar, GFP_KERNEL);