atomic_read 167 arch/x86/kernel/cpu/mcheck/therm_throt.c if (!atomic_read(&therm_throt_en)) atomic_read 145 arch/x86/kernel/cpu/mtrr/main.c while(!atomic_read(&data->gate)) atomic_read 156 arch/x86/kernel/cpu/mtrr/main.c while(atomic_read(&data->gate)) atomic_read 231 arch/x86/kernel/cpu/mtrr/main.c while(atomic_read(&data.count)) atomic_read 251 arch/x86/kernel/cpu/mtrr/main.c while(atomic_read(&data.count)) atomic_read 262 arch/x86/kernel/cpu/mtrr/main.c while(atomic_read(&data.count)) atomic_read 189 arch/x86/kernel/cpu/perfctr-watchdog.c if (atomic_read(&nmi_active) <= 0) atomic_read 197 arch/x86/kernel/cpu/perfctr-watchdog.c BUG_ON(atomic_read(&nmi_active) != 0); atomic_read 205 arch/x86/kernel/cpu/perfctr-watchdog.c if (atomic_read(&nmi_active) != 0) atomic_read 106 arch/x86/kernel/crash.c while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { atomic_read 349 arch/x86/kernel/irq_32.c seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); atomic_read 351 arch/x86/kernel/irq_32.c seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); atomic_read 383 arch/x86/kernel/irq_32.c u64 sum = atomic_read(&irq_err_count); atomic_read 386 arch/x86/kernel/irq_32.c sum += atomic_read(&irq_mis_count); atomic_read 152 arch/x86/kernel/irq_64.c seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); atomic_read 180 arch/x86/kernel/irq_64.c return atomic_read(&irq_err_count); atomic_read 448 arch/x86/kernel/kgdb.c if (atomic_read(&kgdb_active) != -1) { atomic_read 469 arch/x86/kernel/kgdb.c if (atomic_read(&kgdb_active) != -1) { atomic_read 478 arch/x86/kernel/kgdb.c if (atomic_read(&kgdb_cpu_doing_single_step) == atomic_read 76 arch/x86/kernel/nmi.c return atomic_read(&mce_entry) > 0; atomic_read 139 arch/x86/kernel/nmi.c if (!nmi_watchdog_active() || !atomic_read(&nmi_active)) atomic_read 165 arch/x86/kernel/nmi.c if (!atomic_read(&nmi_active)) { atomic_read 222 arch/x86/kernel/nmi.c nmi_pm_active = atomic_read(&nmi_active); atomic_read 224 arch/x86/kernel/nmi.c BUG_ON(atomic_read(&nmi_active) != 0); atomic_read 260 arch/x86/kernel/nmi.c if (atomic_read(&nmi_active) < 0) atomic_read 284 arch/x86/kernel/nmi.c if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) atomic_read 298 arch/x86/kernel/nmi.c if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) atomic_read 318 arch/x86/kernel/nmi.c if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0) atomic_read 493 arch/x86/kernel/nmi.c nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; atomic_read 499 arch/x86/kernel/nmi.c if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) { atomic_read 122 arch/x86/kernel/tsc_sync.c while (atomic_read(&start_count) != cpus-1) atomic_read 131 arch/x86/kernel/tsc_sync.c while (atomic_read(&stop_count) != cpus-1) atomic_read 172 arch/x86/kernel/tsc_sync.c while (atomic_read(&start_count) != cpus) atomic_read 185 arch/x86/kernel/tsc_sync.c while (atomic_read(&stop_count) != cpus) atomic_read 219 arch/x86/kvm/i8254.c return atomic_read(&pit->pit_state.pit_timer.pending); atomic_read 600 arch/x86/kvm/i8254.c if (atomic_read(&ps->pit_timer.pending) && atomic_read 618 arch/x86/kvm/i8254.c if (atomic_read(&ps->pit_timer.pending) && atomic_read 968 arch/x86/kvm/lapic.c return atomic_read(&lapic->timer.pending); atomic_read 1074 arch/x86/kvm/lapic.c atomic_read(&apic->timer.pending) > 0) { atomic_read 97 arch/x86/mm/mmio-mod.c return atomic_read(&mmiotrace_enabled); atomic_read 61 arch/x86/oprofile/nmi_timer_int.c if ((nmi_watchdog != NMI_IO_APIC) || (atomic_read(&nmi_active) <= 0)) atomic_read 558 block/as-iosched.c unsigned long in_flight = atomic_read(&aic->nr_queued) atomic_read 559 block/as-iosched.c + atomic_read(&aic->nr_dispatched); atomic_read 680 block/as-iosched.c if (atomic_read(&aic->nr_queued) > 0) { atomic_read 686 block/as-iosched.c if (atomic_read(&aic->nr_dispatched) > 0) { atomic_read 910 block/as-iosched.c BUG_ON(!atomic_read(&ioc->aic->nr_queued)); atomic_read 38 block/blk-ioc.c BUG_ON(atomic_read(&ioc->refcount) == 0); atomic_read 166 block/blk-ioc.c BUG_ON(atomic_read(&src->refcount) == 0); atomic_read 245 block/blk-tag.c if (atomic_read(&bqt->refcnt) != 1) atomic_read 271 block/blktrace.c snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); atomic_read 908 block/cfq-iosched.c if (!cic || !atomic_read(&cic->ioc->nr_tasks)) atomic_read 1170 block/cfq-iosched.c BUG_ON(atomic_read(&cfqq->ref) <= 0); atomic_read 1744 block/cfq-iosched.c if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || atomic_read 346 crypto/algapi.c BUG_ON(atomic_read(&alg->cra_refcnt) != 1); atomic_read 399 crypto/algapi.c BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); atomic_read 187 crypto/async_tx/async_tx.c else if (atomic_read(&ref->count) < atomic_read 188 crypto/async_tx/async_tx.c atomic_read(&min_ref->count)) atomic_read 93 crypto/proc.c seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt)); atomic_read 50 fs/affs/file.c inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); atomic_read 59 fs/affs/file.c inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); atomic_read 388 fs/afs/callback.c if (atomic_read(&vnode->usage) > 0) atomic_read 301 fs/afs/cell.c _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); atomic_read 303 fs/afs/cell.c ASSERTCMP(atomic_read(&cell->usage), >, 0); atomic_read 332 fs/afs/cell.c _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); atomic_read 334 fs/afs/cell.c ASSERTCMP(atomic_read(&cell->usage), >=, 0); atomic_read 338 fs/afs/cell.c if (atomic_read(&cell->usage) > 0) { atomic_read 345 fs/afs/cell.c while (atomic_read(&cell->usage) > 0) { atomic_read 355 fs/afs/cell.c ASSERTCMP(atomic_read(&cell->usage), ==, 0); atomic_read 405 fs/afs/cell.c cell->name, atomic_read(&cell->usage)); atomic_read 615 fs/afs/internal.h _debug("GET SERVER %d", atomic_read(&(S)->usage)); \ atomic_read 241 fs/afs/proc.c atomic_read(&cell->usage), cell->name); atomic_read 538 fs/afs/proc.c atomic_read(&vlocation->usage), atomic_read 742 fs/afs/proc.c atomic_read(&server->usage), ipaddr, server->fs_state); atomic_read 125 fs/afs/rxrpc.c ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0); atomic_read 126 fs/afs/rxrpc.c ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0); atomic_read 137 fs/afs/rxrpc.c _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs)); atomic_read 141 fs/afs/rxrpc.c skb, skb->mark, atomic_read(&afs_outstanding_skbs)); atomic_read 154 fs/afs/rxrpc.c _debug("FREE NULL [%d]", atomic_read(&afs_outstanding_skbs)); atomic_read 158 fs/afs/rxrpc.c skb, skb->mark, atomic_read(&afs_outstanding_skbs)); atomic_read 171 fs/afs/rxrpc.c call, call->type->name, atomic_read(&afs_outstanding_calls)); atomic_read 197 fs/afs/rxrpc.c call, type->name, atomic_read(&afs_outstanding_calls)); atomic_read 323 fs/afs/rxrpc.c atomic_read(&afs_outstanding_calls)); atomic_read 403 fs/afs/rxrpc.c skb, skb->mark, atomic_read(&afs_outstanding_skbs)); atomic_read 692 fs/afs/rxrpc.c atomic_read(&afs_outstanding_calls)); atomic_read 96 fs/afs/server.c _leave(" = %p{%d}", server, atomic_read(&server->usage)); atomic_read 142 fs/afs/server.c _leave(" = %p{%d}", server, atomic_read(&server->usage)); atomic_read 156 fs/afs/server.c _leave(" = %p{%d}", server, atomic_read(&server->usage)); atomic_read 224 fs/afs/server.c _enter("%p{%d}", server, atomic_read(&server->usage)); atomic_read 226 fs/afs/server.c _debug("PUT SERVER %d", atomic_read(&server->usage)); atomic_read 228 fs/afs/server.c ASSERTCMP(atomic_read(&server->usage), >, 0); atomic_read 238 fs/afs/server.c if (atomic_read(&server->usage) == 0) { atomic_read 261 fs/afs/server.c ASSERTCMP(atomic_read(&server->cb_break_n), ==, 0); atomic_read 298 fs/afs/server.c if (atomic_read(&server->usage) > 0) { atomic_read 119 fs/afs/super.c if (atomic_read(&afs_count_active_inodes) != 0) { atomic_read 121 fs/afs/super.c atomic_read(&afs_count_active_inodes)); atomic_read 493 fs/afs/vlocation.c ASSERTCMP(atomic_read(&vl->usage), >, 0); atomic_read 501 fs/afs/vlocation.c if (atomic_read(&vl->usage) == 0) { atomic_read 570 fs/afs/vlocation.c if (atomic_read(&vl->usage) > 0) { atomic_read 648 fs/afs/vlocation.c if (atomic_read(&vl->usage) > 0) atomic_read 178 fs/afs/volume.c ASSERTCMP(atomic_read(&volume->usage), >, 0); atomic_read 222 fs/aio.c BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ atomic_read 226 fs/aio.c BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ atomic_read 391 fs/aio.c if (1 != atomic_read(&ctx->users)) atomic_read 394 fs/aio.c atomic_read(&ctx->users), ctx->dead, atomic_read 175 fs/autofs4/expire.c unsigned int ino_count = atomic_read(&ino->count); atomic_read 189 fs/autofs4/expire.c if (atomic_read(&p->d_count) > ino_count) { atomic_read 337 fs/autofs4/expire.c ino_count = atomic_read(&ino->count) + 2; atomic_read 338 fs/autofs4/expire.c if (atomic_read(&dentry->d_count) > ino_count) atomic_read 359 fs/autofs4/expire.c ino_count = atomic_read(&ino->count) + 1; atomic_read 360 fs/autofs4/expire.c if (atomic_read(&dentry->d_count) > ino_count) atomic_read 373 fs/autofs4/expire.c ino_count = atomic_read(&ino->count) + 1; atomic_read 374 fs/autofs4/expire.c if (atomic_read(&dentry->d_count) > ino_count) atomic_read 384 fs/autofs4/root.c if (atomic_read(&dentry->d_count) == 0) atomic_read 246 fs/bio.c BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); atomic_read 1213 fs/buffer.c if (atomic_read(&buf->b_count)) { atomic_read 3040 fs/buffer.c WARN_ON(atomic_read(&bh->b_count) < 1); atomic_read 3081 fs/buffer.c return atomic_read(&bh->b_count) | atomic_read 139 fs/cifs/cifs_debug.c atomic_read(&ses->inUse), atomic_read 147 fs/cifs/cifs_debug.c atomic_read(&ses->server->socketUseCount), atomic_read 149 fs/cifs/cifs_debug.c atomic_read(&ses->server->inFlight)); atomic_read 153 fs/cifs/cifs_debug.c atomic_read(&ses->server->inSend), atomic_read 154 fs/cifs/cifs_debug.c atomic_read(&ses->server->num_waiters)); atomic_read 189 fs/cifs/cifs_debug.c tcon->treeName, atomic_read(&tcon->useCount)); atomic_read 295 fs/cifs/cifs_debug.c atomic_read(&totBufAllocCount), atomic_read 296 fs/cifs/cifs_debug.c atomic_read(&totSmBufAllocCount)); atomic_read 317 fs/cifs/cifs_debug.c atomic_read(&tcon->num_smbs_sent), atomic_read 318 fs/cifs/cifs_debug.c atomic_read(&tcon->num_oplock_brks)); atomic_read 320 fs/cifs/cifs_debug.c atomic_read(&tcon->num_reads), atomic_read 323 fs/cifs/cifs_debug.c atomic_read(&tcon->num_writes), atomic_read 327 fs/cifs/cifs_debug.c atomic_read(&tcon->num_locks), atomic_read 328 fs/cifs/cifs_debug.c atomic_read(&tcon->num_hardlinks), atomic_read 329 fs/cifs/cifs_debug.c atomic_read(&tcon->num_symlinks)); atomic_read 332 fs/cifs/cifs_debug.c atomic_read(&tcon->num_opens), atomic_read 333 fs/cifs/cifs_debug.c atomic_read(&tcon->num_closes), atomic_read 334 fs/cifs/cifs_debug.c atomic_read(&tcon->num_deletes)); atomic_read 336 fs/cifs/cifs_debug.c atomic_read(&tcon->num_mkdirs), atomic_read 337 fs/cifs/cifs_debug.c atomic_read(&tcon->num_rmdirs)); atomic_read 339 fs/cifs/cifs_debug.c atomic_read(&tcon->num_renames), atomic_read 340 fs/cifs/cifs_debug.c atomic_read(&tcon->num_t2renames)); atomic_read 342 fs/cifs/cifs_debug.c atomic_read(&tcon->num_ffirst), atomic_read 343 fs/cifs/cifs_debug.c atomic_read(&tcon->num_fnext), atomic_read 344 fs/cifs/cifs_debug.c atomic_read(&tcon->num_fclose)); atomic_read 514 fs/cifs/cifsfs.c if (atomic_read(&tcon->useCount) == 1) atomic_read 1002 fs/cifs/cifsfs.c if (ses->server && atomic_read(&ses->server->inFlight)) atomic_read 755 fs/cifs/cifssmb.c if (atomic_read(&tcon->useCount) > 0) { atomic_read 805 fs/cifs/cifssmb.c if (atomic_read(&ses->inUse) > 0) { atomic_read 829 fs/cifs/cifssmb.c if (atomic_read(&ses->server->socketUseCount) == 0) { atomic_read 666 fs/cifs/connect.c if (atomic_read(&server->inFlight) >= cifs_max_pending) atomic_read 2224 fs/cifs/connect.c if (atomic_read(&srvTcp->socketUseCount) == 0) { atomic_read 498 fs/cifs/file.c while ((atomic_read(&pSMBFile->wrtPending) != 0) atomic_read 512 fs/cifs/file.c if (atomic_read(&pSMBFile->wrtPending)) atomic_read 544 fs/cifs/file.c while ((atomic_read(&pSMBFile->wrtPending) != 0) atomic_read 541 fs/cifs/inode.c if (atomic_read(&cifsInfo->inUse) == 0 || atomic_read 601 fs/cifs/inode.c } else if (atomic_read(&cifsInfo->inUse) == 0) { atomic_read 188 fs/cifs/readdir.c if (atomic_read(&cifsInfo->inUse) == 0) { atomic_read 199 fs/cifs/readdir.c if ((atomic_read(&cifsInfo->inUse) == 0) || atomic_read 241 fs/cifs/readdir.c if (atomic_read(&cifsInfo->inUse) == 0) atomic_read 335 fs/cifs/transport.c if (atomic_read(&ses->server->inFlight) >= atomic_read 342 fs/cifs/transport.c atomic_read(&ses->server->inFlight) atomic_read 34 fs/coda/cache.c cii->c_cached_epoch = atomic_read(&permission_epoch); atomic_read 46 fs/coda/cache.c cii->c_cached_epoch = atomic_read(&permission_epoch) - 1; atomic_read 64 fs/coda/cache.c cii->c_cached_epoch == atomic_read(&permission_epoch); atomic_read 611 fs/coda/dir.c if (atomic_read(&de->d_count) > 1) atomic_read 145 fs/configfs/configfs_internal.h WARN_ON(!atomic_read(&sd->s_count)); atomic_read 153 fs/configfs/configfs_internal.h WARN_ON(!atomic_read(&sd->s_count)); atomic_read 315 fs/configfs/dir.c atomic_read(&d->d_count)); atomic_read 217 fs/dcache.c if (atomic_read(&dentry->d_count) == 1) atomic_read 223 fs/dcache.c if (atomic_read(&dentry->d_count)) { atomic_read 300 fs/dcache.c if (atomic_read(&dentry->d_count) > 1) { atomic_read 395 fs/dcache.c if (!atomic_read(&dentry->d_count)) { atomic_read 502 fs/dcache.c if (atomic_read(&dentry->d_count)) { atomic_read 654 fs/dcache.c if (atomic_read(&dentry->d_count) != 0) { atomic_read 663 fs/dcache.c atomic_read(&dentry->d_count), atomic_read 825 fs/dcache.c if (!atomic_read(&dentry->d_count)) { atomic_read 1526 fs/dcache.c if (atomic_read(&dentry->d_count) == 1) { atomic_read 906 fs/dlm/user.c return atomic_read(&dlm_monitor_opened) ? 1 : 0; atomic_read 402 fs/dquot.c if (atomic_read(&dquot->dq_count) > 1) atomic_read 438 fs/dquot.c if (atomic_read(&dquot->dq_count)) { atomic_read 452 fs/dquot.c if (atomic_read(&dquot->dq_count) > 1) atomic_read 570 fs/dquot.c if (!atomic_read(&dquot->dq_count)) { atomic_read 585 fs/dquot.c if (atomic_read(&dquot->dq_count) > 1) { atomic_read 590 fs/dquot.c atomic_read(&dquot->dq_count) == 1) atomic_read 680 fs/dquot.c if (!atomic_read(&dquot->dq_count)) atomic_read 725 fs/dquot.c if (!atomic_read(&inode->i_writecount)) atomic_read 752 fs/dquot.c if (atomic_read(&dquot->dq_count) <= 1) atomic_read 768 fs/dquot.c if (atomic_read(&dquot->dq_count) != 1) atomic_read 769 fs/dquot.c printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count)); atomic_read 304 fs/ecryptfs/inode.c BUG_ON(!atomic_read(&lower_dentry->d_count)); atomic_read 535 fs/ecryptfs/miscdev.c BUG_ON(atomic_read(&ecryptfs_num_miscdev_opens) != 0); atomic_read 802 fs/exec.c while (atomic_read(&sig->count) > count) { atomic_read 883 fs/exec.c if (atomic_read(&oldsighand->count) != 1) { atomic_read 1092 fs/exec.c if (atomic_read(&p->fs->count) > 1 || atomic_read 1093 fs/exec.c atomic_read(&p->files->count) > 1 || atomic_read 1094 fs/exec.c atomic_read(&p->sighand->count) > 1) atomic_read 1551 fs/exec.c if (atomic_read(&mm->mm_users) == nr + 1) atomic_read 173 fs/ext2/xattr.c atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); atomic_read 273 fs/ext2/xattr.c atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); atomic_read 404 fs/ext2/xattr.c atomic_read(&(bh->b_count)), atomic_read 770 fs/ext2/xattr.c ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); atomic_read 841 fs/ext2/xattr.c atomic_read(&ext2_xattr_cache->c_entry_count)); atomic_read 846 fs/ext2/xattr.c atomic_read(&ext2_xattr_cache->c_entry_count)); atomic_read 936 fs/ext2/xattr.c atomic_read(&(bh->b_count))); atomic_read 38 fs/ext3/file.c (atomic_read(&inode->i_writecount) == 1)) atomic_read 103 fs/ext3/ialloc.c if (atomic_read(&inode->i_count) > 1) { atomic_read 105 fs/ext3/ialloc.c atomic_read(&inode->i_count)); atomic_read 235 fs/ext3/xattr.c atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); atomic_read 377 fs/ext3/xattr.c atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); atomic_read 651 fs/ext3/xattr.c atomic_read(&(bs->bh->b_count)), atomic_read 38 fs/ext4/file.c (atomic_read(&inode->i_writecount) == 1)) atomic_read 174 fs/ext4/ialloc.c if (atomic_read(&inode->i_count) > 1) { atomic_read 176 fs/ext4/ialloc.c atomic_read(&inode->i_count)); atomic_read 2619 fs/ext4/mballoc.c atomic_read(&sbi->s_bal_allocated), atomic_read 2620 fs/ext4/mballoc.c atomic_read(&sbi->s_bal_reqs), atomic_read 2621 fs/ext4/mballoc.c atomic_read(&sbi->s_bal_success)); atomic_read 2625 fs/ext4/mballoc.c atomic_read(&sbi->s_bal_ex_scanned), atomic_read 2626 fs/ext4/mballoc.c atomic_read(&sbi->s_bal_goals), atomic_read 2627 fs/ext4/mballoc.c atomic_read(&sbi->s_bal_2orders), atomic_read 2628 fs/ext4/mballoc.c atomic_read(&sbi->s_bal_breaks), atomic_read 2629 fs/ext4/mballoc.c atomic_read(&sbi->s_mb_lost_chunks)); atomic_read 2636 fs/ext4/mballoc.c atomic_read(&sbi->s_mb_preallocated), atomic_read 2637 fs/ext4/mballoc.c atomic_read(&sbi->s_mb_discarded)); atomic_read 3722 fs/ext4/mballoc.c if (atomic_read(&pa->pa_count)) { atomic_read 3827 fs/ext4/mballoc.c if (atomic_read(&pa->pa_count)) { atomic_read 4080 fs/ext4/mballoc.c atomic_read(&ar->inode->i_writecount) ? "" : "non-"); atomic_read 4105 fs/ext4/mballoc.c if (atomic_read(&pa->pa_count)) { atomic_read 228 fs/ext4/xattr.c atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); atomic_read 370 fs/ext4/xattr.c atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); atomic_read 661 fs/ext4/xattr.c atomic_read(&(bs->bh->b_count)), atomic_read 323 fs/file_table.c if (likely((atomic_read(&files->count) == 1))) { atomic_read 354 fs/fs-writeback.c } else if (atomic_read(&inode->i_count)) { atomic_read 380 fs/fs-writeback.c if (!atomic_read(&inode->i_count)) atomic_read 55 fs/fuse/control.c file->private_data=(void *)(long)atomic_read(&fc->num_waiting); atomic_read 84 fs/fuse/dev.c BUG_ON(atomic_read(&req->count) < 2); atomic_read 50 fs/gfs2/daemon.c while (atomic_read(&sdp->sd_reclaim_count)) atomic_read 54 fs/gfs2/daemon.c (atomic_read(&sdp->sd_reclaim_count) || atomic_read 1221 fs/gfs2/glock.c if (!atomic_read(&gl->gl_lvb_count)) { atomic_read 1243 fs/gfs2/glock.c gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); atomic_read 1683 fs/gfs2/glock.c atomic_read(&gl->gl_lvb_count), atomic_read 1684 fs/gfs2/glock.c atomic_read(&gl->gl_ail_count), atomic_read 1685 fs/gfs2/glock.c atomic_read(&gl->gl_ref)); atomic_read 47 fs/gfs2/glops.c blocks = atomic_read(&gl->gl_ail_count); atomic_read 67 fs/gfs2/glops.c gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); atomic_read 385 fs/gfs2/glops.c return !atomic_read(&gl->gl_lvb_count); atomic_read 307 fs/gfs2/log.c while(atomic_read(&sdp->sd_log_blks_free) <= (blks + reserved_blks)) { atomic_read 338 fs/gfs2/log.c atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); atomic_read 562 fs/gfs2/log.c gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); atomic_read 637 fs/gfs2/log.c if (atomic_read(&sdp->sd_log_in_flight)) { atomic_read 641 fs/gfs2/log.c if (atomic_read(&sdp->sd_log_in_flight)) atomic_read 643 fs/gfs2/log.c } while(atomic_read(&sdp->sd_log_in_flight)); atomic_read 792 fs/gfs2/log.c gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= atomic_read 859 fs/gfs2/log.c gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks); atomic_read 104 fs/gfs2/meta_io.c gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); atomic_read 1044 fs/gfs2/ops_address.c if (atomic_read(&bh->b_count)) atomic_read 1210 fs/gfs2/quota.c atomic_read(&sdp->sd_quota_count) > 0); atomic_read 1266 fs/gfs2/quota.c gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); atomic_read 303 fs/gfs2/sys.c (unsigned int)atomic_read(&sdp->sd_##name)); \ atomic_read 304 fs/hfs/bnode.c node->tree->cnid, node->this, atomic_read(&node->refcnt)); atomic_read 446 fs/hfs/bnode.c node->tree->cnid, node->this, atomic_read(&node->refcnt)); atomic_read 458 fs/hfs/bnode.c node->tree->cnid, node->this, atomic_read(&node->refcnt)); atomic_read 459 fs/hfs/bnode.c BUG_ON(!atomic_read(&node->refcnt)); atomic_read 132 fs/hfs/btree.c if (atomic_read(&node->refcnt)) atomic_read 134 fs/hfs/btree.c node->tree->cnid, node->this, atomic_read(&node->refcnt)); atomic_read 79 fs/hfs/inode.c else if (atomic_read(&node->refcnt)) atomic_read 94 fs/hfs/inode.c if (atomic_read(&node->refcnt)) { atomic_read 464 fs/hfsplus/bnode.c node->tree->cnid, node->this, atomic_read(&node->refcnt)); atomic_read 610 fs/hfsplus/bnode.c node->tree->cnid, node->this, atomic_read(&node->refcnt)); atomic_read 622 fs/hfsplus/bnode.c node->tree->cnid, node->this, atomic_read(&node->refcnt)); atomic_read 623 fs/hfsplus/bnode.c BUG_ON(!atomic_read(&node->refcnt)); atomic_read 110 fs/hfsplus/btree.c if (atomic_read(&node->refcnt)) atomic_read 112 fs/hfsplus/btree.c node->tree->cnid, node->this, atomic_read(&node->refcnt)); atomic_read 327 fs/hfsplus/dir.c atomic_read(&HFSPLUS_I(inode).opencnt)) { atomic_read 348 fs/hfsplus/dir.c if (!atomic_read(&HFSPLUS_I(inode).opencnt)) { atomic_read 76 fs/hfsplus/inode.c else if (atomic_read(&node->refcnt)) atomic_read 91 fs/hfsplus/inode.c if (atomic_read(&node->refcnt)) { atomic_read 184 fs/hpfs/inode.c if (hpfs_inode->i_rddir_off && !atomic_read(&i->i_count)) { atomic_read 417 fs/hpfs/namei.c if (atomic_read(&dentry->d_count) > 1 || atomic_read 240 fs/inode.c if (atomic_read(&inode->i_count)) { atomic_read 340 fs/inode.c if (!atomic_read(&inode->i_count)) { atomic_read 386 fs/inode.c if (atomic_read(&inode->i_count)) atomic_read 423 fs/inode.c if (inode->i_state || atomic_read(&inode->i_count)) { atomic_read 367 fs/inotify.c if (!atomic_read(&inode->i_count)) atomic_read 387 fs/inotify.c atomic_read(&next_i->i_count) && atomic_read 392 fs/inotify_user.c if (atomic_read(&dev->user->inotify_watches) >= atomic_read 608 fs/inotify_user.c if (unlikely(atomic_read(&user->inotify_devs) >= atomic_read 57 fs/jbd/commit.c if (atomic_read(&bh->b_count) != 1) atomic_read 713 fs/jbd/commit.c J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); atomic_read 1739 fs/jbd/journal.c (atomic_read(&bh->b_count) > 0) || atomic_read 1954 fs/jbd/journal.c int n = atomic_read(&nr_journal_heads); atomic_read 62 fs/jbd2/commit.c if (atomic_read(&bh->b_count) != 1) atomic_read 737 fs/jbd2/commit.c J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); atomic_read 2106 fs/jbd2/journal.c (atomic_read(&bh->b_count) > 0) || atomic_read 2395 fs/jbd2/journal.c int n = atomic_read(&nr_journal_heads); atomic_read 854 fs/jffs2/xattr.c if (!atomic_read(&xd->refcnt)) { atomic_read 1306 fs/jffs2/xattr.c if (atomic_read(&xd->refcnt) || xd->node != (void *)xd) atomic_read 616 fs/jfs/jfs_dmap.c if ((atomic_read(&bmp->db_active[agpref]) == 0) && atomic_read 627 fs/jfs/jfs_dmap.c if (atomic_read(&bmp->db_active[agpref])) atomic_read 762 fs/jfs/jfs_dmap.c if (atomic_read(&bmp->db_active[agno])) atomic_read 796 fs/jfs/jfs_dmap.c writers = atomic_read(&bmp->db_active[agno]); atomic_read 247 fs/jfs/jfs_imap.c dinom_le->in_numinos = cpu_to_le32(atomic_read(&imp->im_numinos)); atomic_read 248 fs/jfs/jfs_imap.c dinom_le->in_numfree = cpu_to_le32(atomic_read(&imp->im_numfree)); atomic_read 1383 fs/jfs/jfs_imap.c if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { atomic_read 2879 fs/jfs/jfs_imap.c imap->im_nextiag, atomic_read(&imap->im_numinos), atomic_read 2880 fs/jfs/jfs_imap.c atomic_read(&imap->im_numfree)); atomic_read 2987 fs/jfs/jfs_imap.c if (xnuminos != atomic_read(&imap->im_numinos) || atomic_read 2988 fs/jfs/jfs_imap.c xnumfree != atomic_read(&imap->im_numfree)) { atomic_read 165 fs/jfs/super.c maxinodes = min((s64) atomic_read(&imap->im_numinos) + atomic_read 169 fs/jfs/super.c buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) - atomic_read 170 fs/jfs/super.c atomic_read(&imap->im_numfree)); atomic_read 254 fs/lockd/host.c BUG_ON(atomic_read(&host->h_count)); atomic_read 466 fs/lockd/host.c BUG_ON(atomic_read(&host->h_count) < 0); atomic_read 576 fs/lockd/host.c host->h_name, atomic_read(&host->h_count), atomic_read 606 fs/lockd/host.c if (atomic_read(&host->h_count) || host->h_inuse atomic_read 609 fs/lockd/host.c host->h_name, atomic_read(&host->h_count), atomic_read 111 fs/lockd/mon.c if (atomic_read(&nsm->sm_count) == 1 atomic_read 1373 fs/locks.c if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) atomic_read 1376 fs/locks.c && ((atomic_read(&dentry->d_count) > 1) atomic_read 1377 fs/locks.c || (atomic_read(&inode->i_count) > 1))) atomic_read 211 fs/mbcache.c atomic_read(&cache->c_entry_count)); atomic_read 212 fs/mbcache.c count += atomic_read(&cache->c_entry_count); atomic_read 378 fs/mbcache.c if (atomic_read(&cache->c_entry_count) > 0) { atomic_read 381 fs/mbcache.c atomic_read(&cache->c_entry_count)); atomic_read 333 fs/namei.c if (atomic_read(&inode->i_writecount) < 0) { atomic_read 348 fs/namei.c if (atomic_read(&inode->i_writecount) > 0) { atomic_read 2130 fs/namei.c if (atomic_read(&dentry->d_count) == 2) atomic_read 294 fs/namespace.c if (atomic_read(&mnt->__mnt_writers) >= atomic_read 312 fs/namespace.c if ((atomic_read(&mnt->__mnt_writers) < 0) && atomic_read 316 fs/namespace.c mnt, atomic_read(&mnt->__mnt_writers)); atomic_read 376 fs/namespace.c if (atomic_read(&mnt->__mnt_writers) > 0) { atomic_read 636 fs/namespace.c WARN_ON(atomic_read(&mnt->__mnt_writers)); atomic_read 947 fs/namespace.c actual_refs += atomic_read(&p->mnt_count); atomic_read 1055 fs/namespace.c if (atomic_read(&mnt->mnt_count) != 2) atomic_read 46 fs/ncpfs/file.c atomic_read(&NCP_FINFO(inode)->opened), atomic_read 51 fs/ncpfs/file.c if (!atomic_read(&NCP_FINFO(inode)->opened)) { atomic_read 201 fs/ncpfs/inode.c if (!atomic_read(&NCP_FINFO(inode)->opened)) { atomic_read 293 fs/ncpfs/ncplib_kernel.c if (atomic_read(&NCP_FINFO(inode)->opened) == 1) { atomic_read 215 fs/nfs/client.c dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count)); atomic_read 1416 fs/nfs/client.c atomic_read(&clp->cl_count), atomic_read 1318 fs/nfs/dir.c atomic_read(&dentry->d_count)); atomic_read 1427 fs/nfs/dir.c if (atomic_read(&dentry->d_count) > 1) { atomic_read 1582 fs/nfs/dir.c atomic_read(&new_dentry->d_count)); atomic_read 1598 fs/nfs/dir.c } else if (atomic_read(&new_dentry->d_count) > 2) { atomic_read 1613 fs/nfs/dir.c } else if (atomic_read(&new_dentry->d_count) > 1) atomic_read 1623 fs/nfs/dir.c if (atomic_read(&old_dentry->d_count) > 1) { atomic_read 338 fs/nfs/inode.c atomic_read(&inode->i_count)); atomic_read 1054 fs/nfs/inode.c atomic_read(&inode->i_count), fattr->valid); atomic_read 359 fs/nfs/super.c wait_event(server->active_wq, atomic_read(&server->active) == 0); atomic_read 435 fs/nfsd/nfs4callback.c BUG_ON(atomic_read(&clp->cl_callback.cb_set)); atomic_read 399 fs/nfsd/nfs4state.c atomic_read(&clp->cl_count)); atomic_read 1340 fs/nfsd/nfs4state.c dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count)); atomic_read 1633 fs/nfsd/nfs4state.c if (!atomic_read(&cb->cb_set)) atomic_read 1644 fs/nfsd/nfs4state.c if (!atomic_read(&cb->cb_set) || !sop->so_confirmed) atomic_read 1810 fs/nfsd/nfs4state.c && !atomic_read(&clp->cl_callback.cb_set)) atomic_read 463 fs/nfsd/nfssvc.c update_thread_usage(atomic_read(&nfsd_busy)); atomic_read 473 fs/nfsd/nfssvc.c update_thread_usage(atomic_read(&nfsd_busy)); atomic_read 1030 fs/nfsd/vfs.c if (atomic_read(&inode->i_writecount) > 1 atomic_read 1705 fs/nfsd/vfs.c ((atomic_read(&odentry->d_count) > 1) atomic_read 1706 fs/nfsd/vfs.c || (atomic_read(&ndentry->d_count) > 1))) { atomic_read 1792 fs/nfsd/vfs.c (atomic_read(&rdentry->d_count) > 1)) { atomic_read 2806 fs/ntfs/mft.c if (atomic_read(&ni->count) > 2) { atomic_read 799 fs/ocfs2/cluster/heartbeat.c if (!change && (atomic_read(®->hr_steady_iterations) != 0)) { atomic_read 1331 fs/ocfs2/cluster/heartbeat.c atomic_read(®->hr_steady_iterations) == 0); atomic_read 1529 fs/ocfs2/cluster/heartbeat.c if (atomic_read(®->hr_steady_iterations) != 0) { atomic_read 321 fs/ocfs2/cluster/netdebug.c atomic_read(&sc->sc_kref.refcount), atomic_read 99 fs/ocfs2/cluster/tcp.c atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \ atomic_read 442 fs/ocfs2/cluster/tcp.c return atomic_read(&o2net_connected_peers); atomic_read 1567 fs/ocfs2/cluster/tcp.c timeout = atomic_read(&nn->nn_timeout); atomic_read 1702 fs/ocfs2/cluster/tcp.c BUG_ON(atomic_read(&o2net_connected_peers) < 0); atomic_read 84 fs/ocfs2/dlm/dlmdebug.c atomic_read(&lock->lock_refs.refcount), atomic_read 110 fs/ocfs2/dlm/dlmdebug.c res->last_used, atomic_read(&res->refs.refcount), atomic_read 118 fs/ocfs2/dlm/dlmdebug.c res->inflight_locks, atomic_read(&res->asts_reserved)); atomic_read 315 fs/ocfs2/dlm/dlmdebug.c atomic_read(&mle->mle_refs.refcount)); atomic_read 570 fs/ocfs2/dlm/dlmdebug.c atomic_read(&lock->lock_refs.refcount)); atomic_read 596 fs/ocfs2/dlm/dlmdebug.c atomic_read(&res->asts_reserved), atomic_read 597 fs/ocfs2/dlm/dlmdebug.c atomic_read(&res->refs.refcount)); atomic_read 766 fs/ocfs2/dlm/dlmdebug.c lres = atomic_read(&dlm->local_resources); atomic_read 767 fs/ocfs2/dlm/dlmdebug.c rres = atomic_read(&dlm->remote_resources); atomic_read 768 fs/ocfs2/dlm/dlmdebug.c ures = atomic_read(&dlm->unknown_resources); atomic_read 831 fs/ocfs2/dlm/dlmdebug.c atomic_read(&dlm->dlm_refs.refcount)); atomic_read 1603 fs/ocfs2/dlm/dlmdomain.c atomic_read(&dlm->dlm_refs.refcount)); atomic_read 246 fs/ocfs2/dlm/dlmmaster.c if (!atomic_read(&mle->mle_refs.refcount)) { atomic_read 1088 fs/ocfs2/dlm/dlmmaster.c (atomic_read(&mle->woken) == 1), atomic_read 1934 fs/ocfs2/dlm/dlmmaster.c rr = atomic_read(&mle->mle_refs.refcount); atomic_read 2597 fs/ocfs2/dlm/dlmmaster.c (atomic_read(&mle->woken) == 1), atomic_read 2601 fs/ocfs2/dlm/dlmmaster.c if (atomic_read(&mle->woken) == 1 || atomic_read 253 fs/ocfs2/dlm/dlmunlock.c atomic_read(&lock->lock_refs.refcount)-1); atomic_read 212 fs/ocfs2/journal.c if (atomic_read(&journal->j_num_trans) == 0) { atomic_read 229 fs/ocfs2/journal.c flushed = atomic_read(&journal->j_num_trans); atomic_read 649 fs/ocfs2/journal.c num_running_trans = atomic_read(&(osb->journal->j_num_trans)); atomic_read 671 fs/ocfs2/journal.c BUG_ON(atomic_read(&(osb->journal->j_num_trans)) != 0); atomic_read 1667 fs/ocfs2/journal.c atomic_read(&osb->vol_state) == VOLUME_MOUNTED || atomic_read 1668 fs/ocfs2/journal.c atomic_read(&osb->vol_state) == VOLUME_DISABLED); atomic_read 1673 fs/ocfs2/journal.c if (atomic_read(&osb->vol_state) == VOLUME_DISABLED) { atomic_read 1692 fs/ocfs2/journal.c atomic_read(&journal->j_num_trans) == 0)) { atomic_read 1695 fs/ocfs2/journal.c atomic_read(&journal->j_num_trans) atomic_read 1702 fs/ocfs2/journal.c if (kthread_should_stop() && atomic_read(&journal->j_num_trans)){ atomic_read 1706 fs/ocfs2/journal.c atomic_read(&journal->j_num_trans)); atomic_read 215 fs/ocfs2/stack_user.c if (atomic_read(&ocfs2_control_opened)) atomic_read 615 fs/ocfs2/suballoc.c atomic_read(&osb->s_num_inodes_stolen) < OCFS2_MAX_INODES_TO_STEAL) atomic_read 282 fs/pnode.c int mycount = atomic_read(&mnt->mnt_count) - mnt->mnt_ghosts; atomic_read 264 fs/proc/array.c num_threads = atomic_read(&p->signal->count); atomic_read 265 fs/proc/array.c qsize = atomic_read(&p->user->sigpending); atomic_read 379 fs/proc/array.c num_threads = atomic_read(&sig->count); atomic_read 168 fs/proc/base.c count = atomic_read(&tsk->signal->count); atomic_read 39 fs/proc/inode.c if (!atomic_read(&de->count)) { atomic_read 27 fs/proc/task_nommu.c if (atomic_read(&mm->mm_count) > 1 || atomic_read 28 fs/proc/task_nommu.c atomic_read(&vml->vma->vm_usage) > 1 atomic_read 40 fs/proc/task_nommu.c if (atomic_read(&mm->mm_count) > 1) atomic_read 45 fs/proc/task_nommu.c if (current->fs && atomic_read(¤t->fs->count) > 1) atomic_read 50 fs/proc/task_nommu.c if (current->files && atomic_read(¤t->files->count) > 1) atomic_read 55 fs/proc/task_nommu.c if (current->sighand && atomic_read(¤t->sighand->count) > 1) atomic_read 1264 fs/reiserfs/bitmap.c BUG_ON(atomic_read(&bh->b_count) == 0); atomic_read 42 fs/reiserfs/file.c if ((atomic_read(&inode->i_count) > 1 || atomic_read 97 fs/reiserfs/file.c if (!err && atomic_read(&inode->i_count) <= 1 && atomic_read 2096 fs/reiserfs/fix_node.c if (atomic_read(&(p_s_bh->b_count)) <= 0) { atomic_read 788 fs/reiserfs/journal.c if (atomic_read(&nr_reiserfs_jh) <= 0) atomic_read 949 fs/reiserfs/journal.c atomic_read(&other_jl->j_older_commits_done)) atomic_read 969 fs/reiserfs/journal.c if (atomic_read(&other_jl->j_commit_left) != 0) { atomic_read 999 fs/reiserfs/journal.c if (atomic_read(&j->j_async_throttle)) atomic_read 1025 fs/reiserfs/journal.c if (atomic_read(&jl->j_older_commits_done)) { atomic_read 1054 fs/reiserfs/journal.c if (atomic_read(&(jl->j_commit_left)) <= 0) { atomic_read 1134 fs/reiserfs/journal.c BUG_ON(atomic_read(&(jl->j_commit_left)) != 1); atomic_read 1222 fs/reiserfs/journal.c atomic_read(&cn->jlist->j_commit_left) != 0) atomic_read 1403 fs/reiserfs/journal.c if (atomic_read(&journal->j_wcount) != 0) { atomic_read 1406 fs/reiserfs/journal.c atomic_read(&journal->j_wcount)); atomic_read 1428 fs/reiserfs/journal.c if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read 1429 fs/reiserfs/journal.c atomic_read(&(jl->j_commit_left)) <= 0) { atomic_read 1443 fs/reiserfs/journal.c if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read 1444 fs/reiserfs/journal.c atomic_read(&(jl->j_commit_left)) <= 0) { atomic_read 1451 fs/reiserfs/journal.c if (atomic_read(&(journal->j_wcount)) != 0) { atomic_read 1496 fs/reiserfs/journal.c if (atomic_read(&pjl->j_commit_left)) atomic_read 1543 fs/reiserfs/journal.c if (atomic_read(&(saved_bh->b_count)) < 0) { atomic_read 1653 fs/reiserfs/journal.c if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) atomic_read 1682 fs/reiserfs/journal.c if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) { atomic_read 1785 fs/reiserfs/journal.c atomic_read(&jl->j_commit_left) atomic_read 1846 fs/reiserfs/journal.c if (atomic_read(&tjl->j_commit_left) || atomic_read 1850 fs/reiserfs/journal.c cur_len = atomic_read(&tjl->j_nonzerolen); atomic_read 2901 fs/reiserfs/journal.c atomic_read(&(journal->j_jlock)) || atomic_read 2977 fs/reiserfs/journal.c while ((atomic_read(&journal->j_wcount) > 0 || atomic_read 2978 fs/reiserfs/journal.c atomic_read(&journal->j_jlock)) && atomic_read 3040 fs/reiserfs/journal.c || (!join && atomic_read(&journal->j_wcount) > 0 atomic_read 3044 fs/reiserfs/journal.c && atomic_read(&journal->j_jlock)) atomic_read 3054 fs/reiserfs/journal.c if (atomic_read(&journal->j_wcount) > 10) { atomic_read 3063 fs/reiserfs/journal.c if (atomic_read(&journal->j_jlock)) { atomic_read 3065 fs/reiserfs/journal.c atomic_read(&journal->j_jlock)) { atomic_read 3275 fs/reiserfs/journal.c if (atomic_read(&(journal->j_wcount)) <= 0) { atomic_read 3278 fs/reiserfs/journal.c atomic_read(&(journal->j_wcount))); atomic_read 3412 fs/reiserfs/journal.c if (atomic_read(&(bh->b_count)) < 0) { atomic_read 3458 fs/reiserfs/journal.c atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh && atomic_read 3528 fs/reiserfs/journal.c if (atomic_read(&journal->j_wcount) <= 0 && atomic_read 3580 fs/reiserfs/journal.c if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */ atomic_read 3596 fs/reiserfs/journal.c if (atomic_read(&(journal->j_wcount)) > 0) { atomic_read 3612 fs/reiserfs/journal.c if (atomic_read(&journal->j_jlock)) { atomic_read 3644 fs/reiserfs/journal.c if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) atomic_read 3731 fs/reiserfs/journal.c if (atomic_read atomic_read 3811 fs/reiserfs/journal.c if (atomic_read(&jl->j_commit_left) > 1) atomic_read 3900 fs/reiserfs/journal.c atomic_read(&jl->j_commit_left) == 0 && atomic_read 147 fs/reiserfs/prints.c (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), atomic_read 618 fs/reiserfs/prints.c (tbSh) ? atomic_read(&(tbSh->b_count)) : -1, atomic_read 620 fs/reiserfs/prints.c (tb->L[h]) ? atomic_read(&(tb->L[h]->b_count)) : -1, atomic_read 622 fs/reiserfs/prints.c (tb->R[h]) ? atomic_read(&(tb->R[h]->b_count)) : -1, atomic_read 664 fs/reiserfs/prints.c tb->FEB[i] ? atomic_read(&(tb->FEB[i]->b_count)) : 0, atomic_read 128 fs/reiserfs/procfs.c atomic_read(&r->s_generation_counter), atomic_read 372 fs/reiserfs/procfs.c atomic_read(&r->s_journal->j_wcount), atomic_read 365 fs/reiserfs/stree.c if (atomic_read(&(p_s_bh->b_count))) { atomic_read 907 fs/reiserfs/stree.c #define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1) atomic_read 1481 fs/reiserfs/stree.c if (atomic_read(&p_s_inode->i_count) > 1 || atomic_read 94 fs/smbfs/request.c if (atomic_read(&server->nr_requests) <= MAX_REQUEST_HARD) { atomic_read 144 fs/sysfs/dir.c v = atomic_read(&sd->s_active); atomic_read 83 fs/sysfs/file.c buffer->event = atomic_read(&attr_sd->s_attr.open->event); atomic_read 433 fs/sysfs/file.c if (buffer->event != atomic_read(&od->event)) atomic_read 130 fs/sysfs/sysfs.h WARN_ON(!atomic_read(&sd->s_count)); atomic_read 335 fs/ubifs/super.c ubifs_assert(!atomic_read(&inode->i_count)); atomic_read 349 fs/xfs/linux-2.6/xfs_aops.c ASSERT(atomic_read(&bio->bi_cnt) >= 1); atomic_read 841 fs/xfs/linux-2.6/xfs_buf.c ASSERT(atomic_read(&bp->b_hold) > 0); atomic_read 909 fs/xfs/linux-2.6/xfs_buf.c if (atomic_read(&bp->b_io_remaining)) atomic_read 964 fs/xfs/linux-2.6/xfs_buf.c return atomic_read(&bp->b_pin_count); atomic_read 973 fs/xfs/linux-2.6/xfs_buf.c if (atomic_read(&bp->b_pin_count) == 0) atomic_read 979 fs/xfs/linux-2.6/xfs_buf.c if (atomic_read(&bp->b_pin_count) == 0) atomic_read 981 fs/xfs/linux-2.6/xfs_buf.c if (atomic_read(&bp->b_io_remaining)) atomic_read 1273 fs/xfs/linux-2.6/xfs_buf.c if (atomic_read(&bp->b_io_remaining)) atomic_read 58 fs/xfs/linux-2.6/xfs_vnode.c wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); atomic_read 67 fs/xfs/linux-2.6/xfs_vnode.h return atomic_read(&vp->i_count); atomic_read 72 fs/xfs/linux-2.6/xfs_vnode.h ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ atomic_read 2180 fs/xfs/quota/xfs_qm.c ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; atomic_read 2333 fs/xfs/quota/xfs_qm.c if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { atomic_read 66 fs/xfs/quota/xfs_qm_stats.c xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, atomic_read 206 fs/xfs/support/ktrace.c index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; atomic_read 232 fs/xfs/support/ktrace.c index = atomic_read(&ktp->kt_index) & ktp->kt_index_mask; atomic_read 160 fs/xfs/xfs_buf_item.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 234 fs/xfs/xfs_buf_item.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 355 fs/xfs/xfs_buf_item.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 385 fs/xfs/xfs_buf_item.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 440 fs/xfs/xfs_buf_item.c if ((atomic_read(&bip->bli_refcount) == 1) && atomic_read 1164 fs/xfs/xfs_buf_item.c (void *)((unsigned long)atomic_read(&bip->bli_refcount)), atomic_read 87 fs/xfs/xfs_filestream.h return atomic_read(&mp->m_perag[agno].pagf_fstrms); atomic_read 2701 fs/xfs/xfs_inode.c ASSERT(atomic_read(&ip->i_pincount) > 0); atomic_read 2721 fs/xfs/xfs_inode.c if (atomic_read(&ip->i_pincount) == 0) atomic_read 2728 fs/xfs/xfs_inode.c wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0)); atomic_read 561 fs/xfs/xfs_inode.h #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) atomic_read 1460 fs/xfs/xfs_log.c ASSERT(atomic_read(&iclog->ic_refcnt) == 0); atomic_read 2336 fs/xfs/xfs_log.c ASSERT(atomic_read(&iclog->ic_refcnt) == 0); atomic_read 2841 fs/xfs/xfs_log.c ASSERT(atomic_read(&iclog->ic_refcnt) > 0); atomic_read 2972 fs/xfs/xfs_log.c (atomic_read(&iclog->ic_refcnt) == 0 atomic_read 2981 fs/xfs/xfs_log.c if (atomic_read(&iclog->ic_refcnt) == 0) { atomic_read 117 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 211 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 387 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 544 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 616 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) == 0); atomic_read 703 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 725 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 770 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 833 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 909 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 935 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 965 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 998 fs/xfs/xfs_trans_buf.c ASSERT(atomic_read(&bip->bli_refcount) > 0); atomic_read 99 fs/xfs/xfs_vfsops.c while (atomic_read(&mp->m_active_trans) > 0) atomic_read 105 fs/xfs/xfs_vfsops.c ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0); atomic_read 187 include/asm-frv/atomic.h c = atomic_read(v); atomic_read 148 include/asm-generic/atomic.h return (long)atomic_read(v); atomic_read 259 include/asm-m32r/atomic.h c = atomic_read(v); atomic_read 68 include/asm-m68k/atomic.h : "g" (i), "2" (atomic_read(v))); atomic_read 82 include/asm-m68k/atomic.h : "g" (i), "2" (atomic_read(v))); atomic_read 97 include/asm-m68k/atomic.h t = atomic_read(v); atomic_read 111 include/asm-m68k/atomic.h t = atomic_read(v); atomic_read 125 include/asm-m68k/atomic.h prev = atomic_read(v); atomic_read 138 include/asm-m68k/atomic.h prev = atomic_read(v); atomic_read 176 include/asm-m68k/atomic.h c = atomic_read(v); atomic_read 136 include/asm-mn10300/atomic.h c = atomic_read(v); \ atomic_read 214 include/asm-parisc/atomic.h c = atomic_read(v); atomic_read 24 include/asm-parisc/mmu_context.h BUG_ON(atomic_read(&mm->mm_users) != 1); atomic_read 192 include/asm-x86/atomic_32.h __i = atomic_read(v); atomic_read 226 include/asm-x86/atomic_32.h c = atomic_read(v); atomic_read 390 include/asm-x86/atomic_64.h c = atomic_read(v); atomic_read 34 include/asm-x86/es7000/wakecpu.h while (!atomic_read(deassert)) atomic_read 18 include/asm-x86/mach-default/mach_wakecpu.h while (!atomic_read(deassert)) atomic_read 333 include/asm-x86/spinlock.h if (atomic_read(count) >= 0) atomic_read 240 include/asm-xtensa/atomic.h c = atomic_read(v); atomic_read 447 include/linux/atmdev.h return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) < atomic_read 319 include/linux/buffer_head.h if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0) atomic_read 326 include/linux/dcache.h BUG_ON(!atomic_read(&dentry->d_count)); atomic_read 1195 include/linux/fs.h #define has_fs_excl() atomic_read(¤t->fs_excl) atomic_read 241 include/linux/mm.h VM_BUG_ON(atomic_read(&page->_count) == 0); atomic_read 285 include/linux/mm.h return atomic_read(&compound_head(page)->_count); atomic_read 291 include/linux/mm.h VM_BUG_ON(atomic_read(&page->_count) == 0); atomic_read 669 include/linux/mm.h return atomic_read(&(page)->_mapcount) + 1; atomic_read 677 include/linux/mm.h return atomic_read(&(page)->_mapcount) >= 0; atomic_read 117 include/linux/mutex.h return atomic_read(&lock->count) != 1; atomic_read 1266 include/linux/reiserfs_fs.h #define get_generation(s) atomic_read (&fs_generation(s)) atomic_read 537 include/linux/skbuff.h (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; atomic_read 554 include/linux/skbuff.h dataref = atomic_read(&skb_shinfo(skb)->dataref); atomic_read 583 include/linux/skbuff.h return atomic_read(&skb->users) != 1; atomic_read 149 include/linux/sunrpc/cache.h if (atomic_read(&h->ref.refcount) <= 2 && atomic_read 204 include/net/dn_nsp.h return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); atomic_read 201 include/net/dst.h if (!atomic_read(&dst->__refcnt)) { atomic_read 677 include/net/ip_vs.h if (atomic_read(&ctl_cp->n_control) == 0) { atomic_read 266 include/net/llc_c_ev.h return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < atomic_read 110 include/net/net_namespace.h return net && atomic_read(&net->count); atomic_read 358 include/net/sock.h WARN_ON(atomic_read(&sk->sk_refcnt) == 1); atomic_read 627 include/net/sock.h sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); atomic_read 632 include/net/sock.h if (atomic_read(&sk->sk_refcnt) != 1) atomic_read 634 include/net/sock.h sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); atomic_read 1160 include/net/sock.h if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= atomic_read 1188 include/net/sock.h amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 1231 include/net/sock.h return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); atomic_read 265 include/net/tcp.h atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); atomic_read 521 include/net/tcp.h atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && atomic_read 953 include/net/tcp.h atomic_read(&sk->sk_rmem_alloc)); atomic_read 1207 include/net/xfrm.h return atomic_read(&x->tunnel_users); atomic_read 96 ipc/msg.c nb_ns = atomic_read(&nr_ipc_ns); atomic_read 508 ipc/msg.c msginfo.msgmap = atomic_read(&ns->msg_hdrs); atomic_read 509 ipc/msg.c msginfo.msgtql = atomic_read(&ns->msg_bytes); atomic_read 254 kernel/audit.c atomic_read(&audit_lost), atomic_read 698 kernel/audit.c status_set.lost = atomic_read(&audit_lost); atomic_read 1731 kernel/cgroup.c count += atomic_read(&link->cg->ref.refcount); atomic_read 2396 kernel/cgroup.c if (css && atomic_read(&css->refcnt)) atomic_read 2413 kernel/cgroup.c if (atomic_read(&cgrp->count) != 0) { atomic_read 2998 kernel/cgroup.c if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count) atomic_read 36 kernel/cgroup_debug.c return atomic_read(&cont->count); atomic_read 60 kernel/cgroup_debug.c count = atomic_read(¤t->cgroups->ref.refcount); atomic_read 146 kernel/exec_domain.c if (atomic_read(¤t->fs->count) != 1) { atomic_read 87 kernel/exit.c BUG_ON(!atomic_read(&sig->count)); atomic_read 100 kernel/exit.c if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) atomic_read 586 kernel/exit.c if (atomic_read(&mm->mm_users) <= 1) atomic_read 1001 kernel/exit.c WARN_ON(atomic_read(&tsk->fs_excl)); atomic_read 145 kernel/fork.c WARN_ON(atomic_read(&tsk->usage)); atomic_read 538 kernel/fork.c && atomic_read(&mm->mm_users) > 1) { atomic_read 944 kernel/fork.c if (atomic_read(&p->user->processes) >= atomic_read 1472 kernel/fork.c (atomic_read(¤t->signal->count) > 1)) atomic_read 1501 kernel/fork.c (fs && atomic_read(&fs->count) > 1)) { atomic_read 1517 kernel/fork.c if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1) atomic_read 1531 kernel/fork.c (mm && atomic_read(&mm->mm_users) > 1)) { atomic_read 1547 kernel/fork.c (fd && atomic_read(&fd->count) > 1)) { atomic_read 535 kernel/futex.c WARN_ON(!atomic_read(&pi_state->refcount)); atomic_read 543 kernel/kgdb.c tid = -atomic_read(&kgdb_active) - 2; atomic_read 581 kernel/kgdb.c while (atomic_read(&passive_cpu_wait[cpu])) atomic_read 818 kernel/kgdb.c if (atomic_read(&kgdb_setting_breakpoint)) atomic_read 1346 kernel/kgdb.c if (atomic_read(&kgdb_active) != raw_smp_processor_id()) atomic_read 1432 kernel/kgdb.c if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && atomic_read 1433 kernel/kgdb.c atomic_read(&kgdb_cpu_doing_single_step) != cpu) { atomic_read 1488 kernel/kgdb.c while (!atomic_read(&cpu_in_kgdb[i])) atomic_read 1521 kernel/kgdb.c while (atomic_read(&cpu_in_kgdb[i])) atomic_read 1539 kernel/kgdb.c if (!atomic_read(&cpu_in_kgdb[cpu]) && atomic_read 1540 kernel/kgdb.c atomic_read(&kgdb_active) != cpu && atomic_read 1541 kernel/kgdb.c atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) { atomic_read 1556 kernel/kgdb.c if (!kgdb_connected || atomic_read(&kgdb_active) != -1) atomic_read 101 kernel/kmod.c if (atomic_read(&kmod_concurrent) > max_modprobes) { atomic_read 309 kernel/kmod.c atomic_read(&running_helpers) == 0, atomic_read 432 kernel/lockdep.c # define debug_atomic_read(ptr) atomic_read(ptr) atomic_read 92 kernel/lockdep_internals.h # define debug_atomic_read(ptr) atomic_read(ptr) atomic_read 59 kernel/ns_cgroup.c if (atomic_read(&new_cgroup->count) != 0) atomic_read 152 kernel/pid.c if (likely(atomic_read(&map->nr_free))) { atomic_read 211 kernel/pid.c if ((atomic_read(&pid->count) == 1) || atomic_read 153 kernel/pm_qos_params.c if (atomic_read(&pm_qos_array[target]->target_value) != extreme_value) { atomic_read 157 kernel/pm_qos_params.c atomic_read(&pm_qos_array[target]->target_value)); atomic_read 196 kernel/pm_qos_params.c return atomic_read(&pm_qos_array[pm_qos_class]->target_value); atomic_read 493 kernel/posix-cpu-timers.c unsigned int nthreads = atomic_read(&p->signal->live); atomic_read 1206 kernel/posix-cpu-timers.c const unsigned int nthreads = atomic_read(&sig->live); atomic_read 152 kernel/rcupreempt_trace.c atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked)); atomic_read 155 kernel/rcupreempt_trace.c atomic_read(&cp->rcu_try_flip_1)); atomic_read 157 kernel/rcupreempt_trace.c atomic_read(&cp->rcu_try_flip_e1)); atomic_read 194 kernel/rcupreempt_trace.c trace.done_remove, atomic_read(&trace.done_invoked), atomic_read 195 kernel/rcupreempt_trace.c atomic_read(&trace.rcu_try_flip_1), atomic_read 196 kernel/rcupreempt_trace.c atomic_read(&trace.rcu_try_flip_e1), atomic_read 771 kernel/rcutorture.c atomic_read(&n_rcu_torture_alloc), atomic_read 772 kernel/rcutorture.c atomic_read(&n_rcu_torture_alloc_fail), atomic_read 773 kernel/rcutorture.c atomic_read(&n_rcu_torture_free), atomic_read 774 kernel/rcutorture.c atomic_read(&n_rcu_torture_mberror), atomic_read 776 kernel/rcutorture.c if (atomic_read(&n_rcu_torture_mberror) != 0) atomic_read 795 kernel/rcutorture.c atomic_read(&rcu_torture_wcount[i])); atomic_read 1002 kernel/rcutorture.c if (atomic_read(&n_rcu_torture_error)) atomic_read 2707 kernel/sched.c sum += atomic_read(&cpu_rq(i)->nr_iowait); atomic_read 3820 kernel/sched.c atomic_read(&nohz.load_balancer) == cpu) { atomic_read 3828 kernel/sched.c if (atomic_read(&nohz.load_balancer) == cpu) atomic_read 3833 kernel/sched.c if (atomic_read(&nohz.load_balancer) == -1) { atomic_read 3837 kernel/sched.c } else if (atomic_read(&nohz.load_balancer) == cpu) atomic_read 3845 kernel/sched.c if (atomic_read(&nohz.load_balancer) == cpu) atomic_read 3953 kernel/sched.c atomic_read(&nohz.load_balancer) == this_cpu) { atomic_read 3996 kernel/sched.c if (atomic_read(&nohz.load_balancer) == cpu) { atomic_read 4001 kernel/sched.c if (atomic_read(&nohz.load_balancer) == -1) { atomic_read 4021 kernel/sched.c if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && atomic_read 4031 kernel/sched.c if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && atomic_read 4156 kernel/sched.c else if (atomic_read(&rq->nr_iowait) > 0) atomic_read 4188 kernel/sched.c if (atomic_read(&rq->nr_iowait) > 0) atomic_read 337 kernel/sched_debug.c num_threads = atomic_read(&p->signal->count); atomic_read 10 kernel/sched_rt.c return atomic_read(&rq->rd->rto_count); atomic_read 193 kernel/signal.c atomic_read(&user->sigpending) <= atomic_read 383 kernel/softirq.c if (!atomic_read(&t->count)) { atomic_read 418 kernel/softirq.c if (!atomic_read(&t->count)) { atomic_read 560 kernel/sys.c if (atomic_read(&new_user->processes) >= atomic_read 303 kernel/time/timer_stats.c if (atomic_read(&overflow_count)) atomic_read 305 kernel/time/timer_stats.c atomic_read(&overflow_count)); atomic_read 742 kernel/trace/trace.c if (atomic_read(&trace_record_cmdline_disabled)) atomic_read 851 kernel/trace/trace.c if (likely(!atomic_read(&data->disabled))) atomic_read 198 kernel/trace/trace_irqsoff.c if (unlikely(!data) || atomic_read(&data->disabled)) atomic_read 238 kernel/trace/trace_irqsoff.c !data->critical_start || atomic_read(&data->disabled)) atomic_read 58 kernel/trace/trace_sched_switch.c if (!atomic_read(&sched_ref)) atomic_read 278 kernel/trace/trace_sched_switch.c if (atomic_read(&sched_ref)) atomic_read 107 lib/fault-inject.c if (atomic_read(&attr->times) == 0) atomic_read 110 lib/fault-inject.c if (atomic_read(&attr->space) > size) { atomic_read 129 lib/fault-inject.c if (atomic_read(&attr->times) != -1) atomic_read 188 lib/fault-inject.c *val = atomic_read((atomic_t *)data); atomic_read 43 lib/kref.c WARN_ON(!atomic_read(&kref->refcount)); atomic_read 33 mm/internal.h VM_BUG_ON(atomic_read(&page->_count)); atomic_read 920 mm/memcontrol.c if (atomic_read(&mem->css.cgroup->count) > 0) atomic_read 77 mm/memory_hotplug.c type = atomic_read(&page->_mapcount); atomic_read 151 mm/mmu_notifier.c BUG_ON(atomic_read(&mm->mm_users) <= 0); atomic_read 191 mm/mmu_notifier.c BUG_ON(atomic_read(&mm->mm_users) <= 0); atomic_read 244 mm/mmu_notifier.c BUG_ON(atomic_read(&mm->mm_count) <= 0); atomic_read 273 mm/mmu_notifier.c BUG_ON(atomic_read(&mm->mm_count) <= 0); atomic_read 395 mm/nommu.c atomic_read(&vml->vma->vm_usage)); atomic_read 4234 mm/slab.c unsigned long allochit = atomic_read(&cachep->allochit); atomic_read 4235 mm/slab.c unsigned long allocmiss = atomic_read(&cachep->allocmiss); atomic_read 4236 mm/slab.c unsigned long freehit = atomic_read(&cachep->freehit); atomic_read 4237 mm/slab.c unsigned long freemiss = atomic_read(&cachep->freemiss); atomic_read 512 mm/sparse.c magic = atomic_read(&page->_mapcount); atomic_read 785 mm/swapfile.c if (atomic_read(&start_mm->mm_users) == 1) { atomic_read 207 net/appletalk/atalk_proc.c atomic_read(&s->sk_wmem_alloc), atomic_read 208 net/appletalk/atalk_proc.c atomic_read(&s->sk_rmem_alloc), atomic_read 165 net/appletalk/ddp.c if (atomic_read(&sk->sk_wmem_alloc) || atomic_read 166 net/appletalk/ddp.c atomic_read(&sk->sk_rmem_alloc)) { atomic_read 178 net/appletalk/ddp.c if (atomic_read(&sk->sk_wmem_alloc) || atomic_read 179 net/appletalk/ddp.c atomic_read(&sk->sk_rmem_alloc)) { atomic_read 1764 net/appletalk/ddp.c atomic_read(&sk->sk_wmem_alloc); atomic_read 19 net/atm/atm_misc.c if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) atomic_read 34 net/atm/atm_misc.c if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { atomic_read 91 net/atm/atm_misc.c #define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) atomic_read 138 net/atm/clip.c if (atomic_read(&n->refcnt) > 1) { atomic_read 142 net/atm/clip.c atomic_read(&n->refcnt)); atomic_read 837 net/atm/clip.c atomic_read(&entry->neigh->refcnt)); atomic_read 65 net/atm/common.c if (atomic_read(&sk->sk_wmem_alloc) && !atm_may_send(vcc, size)) { atomic_read 67 net/atm/common.c atomic_read(&sk->sk_wmem_alloc), size, atomic_read 72 net/atm/common.c pr_debug("AlTx %d += %d\n", atomic_read(&sk->sk_wmem_alloc), atomic_read 85 net/atm/common.c if (atomic_read(&sk->sk_rmem_alloc)) atomic_read 86 net/atm/common.c printk(KERN_DEBUG "vcc_sock_destruct: rmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_rmem_alloc)); atomic_read 88 net/atm/common.c if (atomic_read(&sk->sk_wmem_alloc)) atomic_read 89 net/atm/common.c printk(KERN_DEBUG "vcc_sock_destruct: wmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_wmem_alloc)); atomic_read 105 net/atm/common.c atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf; atomic_read 500 net/atm/common.c pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atomic_read 66 net/atm/ioctl.c atomic_read(&sk->sk_wmem_alloc), atomic_read 46 net/atm/proc.c atomic_read(&stats->tx),atomic_read(&stats->tx_err), atomic_read 47 net/atm/proc.c atomic_read(&stats->rx),atomic_read(&stats->rx_err), atomic_read 48 net/atm/proc.c atomic_read(&stats->rx_drop)); atomic_read 62 net/atm/proc.c seq_printf(seq, "\t[%d]", atomic_read(&dev->refcnt)); atomic_read 207 net/atm/proc.c atomic_read(&sk->sk_wmem_alloc), sk->sk_sndbuf, atomic_read 208 net/atm/proc.c atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf, atomic_read 209 net/atm/proc.c atomic_read(&sk->sk_refcnt)); atomic_read 36 net/atm/raw.c atomic_read(&sk->sk_wmem_alloc), skb->truesize); atomic_read 164 net/atm/resources.c #define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) atomic_read 259 net/ax25/af_ax25.c atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) { atomic_read 333 net/ax25/af_ax25.c if (atomic_read(&ax25->sk->sk_wmem_alloc) || atomic_read 334 net/ax25/af_ax25.c atomic_read(&ax25->sk->sk_rmem_alloc)) { atomic_read 1696 net/ax25/af_ax25.c amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 1786 net/ax25/af_ax25.c ax25_info.rcv_q = atomic_read(&sk->sk_rmem_alloc); atomic_read 1787 net/ax25/af_ax25.c ax25_info.snd_q = atomic_read(&sk->sk_wmem_alloc); atomic_read 1927 net/ax25/af_ax25.c atomic_read(&ax25->sk->sk_wmem_alloc), atomic_read 1928 net/ax25/af_ax25.c atomic_read(&ax25->sk->sk_rmem_alloc), atomic_read 128 net/ax25/ax25_ds_timer.c if (atomic_read(&sk->sk_rmem_alloc) < atomic_read 273 net/ax25/ax25_in.c if (atomic_read(&sk->sk_rmem_alloc) >= atomic_read 64 net/ax25/ax25_std_timer.c if (atomic_read(&sk->sk_rmem_alloc) < atomic_read 349 net/bluetooth/af_bluetooth.c amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 480 net/bluetooth/bnep/core.c while (!atomic_read(&s->killed)) { atomic_read 294 net/bluetooth/cmtp/core.c while (!atomic_read(&session->terminate)) { atomic_read 166 net/bluetooth/hci_conn.c if (atomic_read(&conn->refcnt)) atomic_read 1123 net/bluetooth/hci_core.c if (atomic_read(&hdev->promisc)) { atomic_read 1514 net/bluetooth/hci_core.c if (atomic_read(&hdev->promisc)) { atomic_read 1564 net/bluetooth/hci_core.c BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); atomic_read 1566 net/bluetooth/hci_core.c if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) { atomic_read 1572 net/bluetooth/hci_core.c if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { atomic_read 545 net/bluetooth/hidp/core.c while (!atomic_read(&session->terminate)) { atomic_read 331 net/bluetooth/rfcomm/core.c BT_DBG("dlc %p refcnt %d session %p", d, atomic_read(&d->refcnt), s); atomic_read 81 net/bluetooth/rfcomm/sock.c if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) atomic_read 220 net/bluetooth/rfcomm/sock.c BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); atomic_read 711 net/bluetooth/rfcomm/sock.c if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) atomic_read 372 net/bluetooth/rfcomm/tty.c if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { atomic_read 814 net/bluetooth/rfcomm/tty.c room = rfcomm_room(dev->dlc) - atomic_read(&dev->wmem_alloc); atomic_read 149 net/bridge/br_netfilter.c if (atomic_read(&nf_bridge->use) > 1) { atomic_read 1321 net/core/dev.c if (atomic_read(&netstamp_needed)) atomic_read 1999 net/core/dev.c WARN_ON(atomic_read(&skb->users)); atomic_read 4156 net/core/dev.c while (atomic_read(&dev->refcnt) != 0) { atomic_read 4185 net/core/dev.c dev->name, atomic_read(&dev->refcnt)); atomic_read 4243 net/core/dev.c BUG_ON(atomic_read(&dev->refcnt)); atomic_read 82 net/core/dst.c if (likely(atomic_read(&dst->__refcnt))) { atomic_read 150 net/core/dst.c atomic_read(&dst_total), delayed, work_performed, atomic_read 167 net/core/dst.c if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { atomic_read 266 net/core/dst.c WARN_ON(atomic_read(&dst->__refcnt) < 1); atomic_read 193 net/core/flow.c if (fle->genid == atomic_read(&flow_cache_genid)) { atomic_read 231 net/core/flow.c fle->genid = atomic_read(&flow_cache_genid); atomic_read 261 net/core/flow.c unsigned genid = atomic_read(&flow_cache_genid); atomic_read 147 net/core/neighbour.c if (atomic_read(&n->refcnt) == 1 && atomic_read 215 net/core/neighbour.c if (atomic_read(&n->refcnt) != 1) { atomic_read 440 net/core/neighbour.c if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1)) atomic_read 736 net/core/neighbour.c if (atomic_read(&n->refcnt) == 1 && atomic_read 839 net/core/neighbour.c atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) { atomic_read 1491 net/core/neighbour.c if (atomic_read(&tbl->entries)) atomic_read 1694 net/core/neighbour.c NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); atomic_read 1748 net/core/neighbour.c .ndtc_entries = atomic_read(&tbl->entries), atomic_read 2060 net/core/neighbour.c ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; atomic_read 2063 net/core/neighbour.c NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes)); atomic_read 2468 net/core/neighbour.c atomic_read(&tbl->entries), atomic_read 93 net/core/net_namespace.c if (unlikely(atomic_read(&net->use_count) != 0)) { atomic_read 95 net/core/net_namespace.c atomic_read(&net->use_count)); atomic_read 486 net/core/netpoll.c atomic_read(&trapped)) { atomic_read 546 net/core/netpoll.c if (atomic_read(&trapped)) { atomic_read 832 net/core/netpoll.c return atomic_read(&trapped); atomic_read 3472 net/core/pktgen.c if (atomic_read(&(pkt_dev->skb->users)) != 1) { atomic_read 3474 net/core/pktgen.c while (atomic_read(&(pkt_dev->skb->users)) != 1) { atomic_read 511 net/core/rtnetlink.c .rta_clntref = atomic_read(&(dst->__refcnt)), atomic_read 445 net/core/skbuff.c if (likely(atomic_read(&skb->users) == 1)) atomic_read 277 net/core/sock.c if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= atomic_read 974 net/core/sock.c if (atomic_read(&sk->sk_omem_alloc)) atomic_read 976 net/core/sock.c __func__, atomic_read(&sk->sk_omem_alloc)); atomic_read 1172 net/core/sock.c if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { atomic_read 1188 net/core/sock.c if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { atomic_read 1204 net/core/sock.c atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { atomic_read 1242 net/core/sock.c if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) atomic_read 1283 net/core/sock.c if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { atomic_read 1456 net/core/sock.c if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) atomic_read 1462 net/core/sock.c } else if (atomic_read(&sk->sk_wmem_alloc) < atomic_read 1469 net/core/sock.c prot->sysctl_mem[2] > atomic_read(prot->sockets_allocated) * atomic_read 1471 net/core/sock.c atomic_read(&sk->sk_rmem_alloc) + atomic_read 1509 net/core/sock.c (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) atomic_read 1650 net/core/sock.c if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { atomic_read 2176 net/core/sock.c proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1, atomic_read 2177 net/core/sock.c proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, atomic_read 28 net/dccp/ccid.c while (atomic_read(&ccids_lockct) != 0) { atomic_read 373 net/dccp/proto.c if (atomic_read(&sk->sk_rmem_alloc) > 0) atomic_read 1241 net/decnet/af_decnet.c amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 551 net/decnet/dn_neigh.c atomic_read(&dn->n.refcnt), atomic_read 583 net/decnet/dn_nsp_in.c if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= atomic_read 170 net/decnet/dn_route.c if (atomic_read(&rt->u.dst.__refcnt) || atomic_read 201 net/decnet/dn_route.c if (atomic_read(&rt->u.dst.__refcnt) || atomic_read 1717 net/decnet/dn_route.c atomic_read(&rt->u.dst.__refcnt), atomic_read 543 net/econet/af_econet.c if (!atomic_read(&sk->sk_wmem_alloc) && atomic_read 544 net/econet/af_econet.c !atomic_read(&sk->sk_rmem_alloc)) { atomic_read 582 net/econet/af_econet.c if (atomic_read(&sk->sk_rmem_alloc) || atomic_read 583 net/econet/af_econet.c atomic_read(&sk->sk_wmem_alloc)) { atomic_read 40 net/ieee80211/ieee80211_crypt.c if (atomic_read(&entry->refcnt) != 0 && !force) atomic_read 151 net/ipv4/af_inet.c WARN_ON(atomic_read(&sk->sk_rmem_alloc)); atomic_read 152 net/ipv4/af_inet.c WARN_ON(atomic_read(&sk->sk_wmem_alloc)); atomic_read 333 net/ipv4/arp.c int probes = atomic_read(&neigh->probes); atomic_read 444 net/ipv4/cipso_ipv4.c if (iter->doi == doi && atomic_read(&iter->refcount)) atomic_read 657 net/ipv4/cipso_ipv4.c if (atomic_read(&iter_doi->refcount) > 0) { atomic_read 161 net/ipv4/inet_diag.c minfo->idiag_rmem = atomic_read(&sk->sk_rmem_alloc); atomic_read 164 net/ipv4/inet_diag.c minfo->idiag_tmem = atomic_read(&sk->sk_wmem_alloc); atomic_read 166 net/ipv4/inet_fragment.c work = atomic_read(&nf->mem) - nf->low_thresh; atomic_read 125 net/ipv4/inet_hashtables.c if (atomic_read(&hashinfo->lhash_users)) { atomic_read 131 net/ipv4/inet_hashtables.c if (!atomic_read(&hashinfo->lhash_users)) atomic_read 44 net/ipv4/inet_timewait_sock.c if (atomic_read(&tw->tw_refcnt) != 1) { atomic_read 46 net/ipv4/inet_timewait_sock.c tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); atomic_read 283 net/ipv4/inetpeer.c if (atomic_read(&p->refcnt) == 1) { atomic_read 84 net/ipv4/ip_fragment.c return atomic_read(&net->ipv4.frags.mem); atomic_read 580 net/ipv4/ip_fragment.c if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) atomic_read 926 net/ipv4/ip_output.c if (atomic_read(&sk->sk_wmem_alloc) <= atomic_read 366 net/ipv4/ipmr.c if (atomic_read(&cache_resolve_queue_len) == 0) atomic_read 387 net/ipv4/ipmr.c if (atomic_read(&cache_resolve_queue_len)) atomic_read 688 net/ipv4/ipmr.c if (atomic_read(&cache_resolve_queue_len)>=10 || atomic_read 866 net/ipv4/ipmr.c if (atomic_read(&cache_resolve_queue_len) != 0) { atomic_read 149 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) atomic_read 326 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c unsigned int nr_conntracks = atomic_read(&net->ct.count); atomic_read 57 net/ipv4/proc.c atomic_read(&tcp_orphan_count), atomic_read 58 net/ipv4/proc.c tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated), atomic_read 59 net/ipv4/proc.c atomic_read(&tcp_memory_allocated)); atomic_read 62 net/ipv4/proc.c atomic_read(&udp_memory_allocated)); atomic_read 801 net/ipv4/raw.c int amount = atomic_read(&sk->sk_wmem_alloc); atomic_read 937 net/ipv4/raw.c atomic_read(&sp->sk_wmem_alloc), atomic_read 938 net/ipv4/raw.c atomic_read(&sp->sk_rmem_alloc), atomic_read 940 net/ipv4/raw.c atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); atomic_read 269 net/ipv4/route.c return atomic_read(&net->ipv4.rt_genid); atomic_read 383 net/ipv4/route.c r->rt_flags, atomic_read(&r->u.dst.__refcnt), atomic_read 391 net/ipv4/route.c r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1, atomic_read 469 net/ipv4/route.c atomic_read(&ipv4_dst_ops.entries), atomic_read 639 net/ipv4/route.c if (atomic_read(&rth->u.dst.__refcnt)) atomic_read 885 net/ipv4/route.c atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) { atomic_read 891 net/ipv4/route.c goal = atomic_read(&ipv4_dst_ops.entries) - atomic_read 896 net/ipv4/route.c goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium; atomic_read 899 net/ipv4/route.c goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium; atomic_read 906 net/ipv4/route.c equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal; atomic_read 963 net/ipv4/route.c atomic_read(&ipv4_dst_ops.entries), goal, i); atomic_read 966 net/ipv4/route.c if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) atomic_read 970 net/ipv4/route.c if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) atomic_read 980 net/ipv4/route.c atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh) atomic_read 984 net/ipv4/route.c atomic_read(&ipv4_dst_ops.entries), goal, rover); atomic_read 1038 net/ipv4/route.c if (!atomic_read(&rth->u.dst.__refcnt)) { atomic_read 1121 net/ipv4/tcp.c !atomic_read(&sk->sk_rmem_alloc))) atomic_read 1891 net/ipv4/tcp.c atomic_read(sk->sk_prot->orphan_count))) { atomic_read 392 net/ipv4/tcp_input.c atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { atomic_read 393 net/ipv4/tcp_input.c sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), atomic_read 396 net/ipv4/tcp_input.c if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) atomic_read 3962 net/ipv4/tcp_input.c if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || atomic_read 4371 net/ipv4/tcp_input.c if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) atomic_read 4383 net/ipv4/tcp_input.c if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) atomic_read 4391 net/ipv4/tcp_input.c if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) atomic_read 4442 net/ipv4/tcp_input.c if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) atomic_read 4706 net/ipv4/tcp_input.c (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { atomic_read 2209 net/ipv4/tcp_ipv4.c atomic_read(&sk->sk_refcnt), atomic_read 2252 net/ipv4/tcp_ipv4.c atomic_read(&sk->sk_refcnt), sk, atomic_read 2280 net/ipv4/tcp_ipv4.c atomic_read(&tw->tw_refcnt), tw, len); atomic_read 1908 net/ipv4/tcp_output.c if (atomic_read(&sk->sk_wmem_alloc) > atomic_read 68 net/ipv4/tcp_timer.c int orphans = atomic_read(&tcp_orphan_count); atomic_read 781 net/ipv4/udp.c int amount = atomic_read(&sk->sk_wmem_alloc); atomic_read 1632 net/ipv4/udp.c atomic_read(&sp->sk_wmem_alloc), atomic_read 1633 net/ipv4/udp.c atomic_read(&sp->sk_rmem_alloc), atomic_read 1635 net/ipv4/udp.c atomic_read(&sp->sk_refcnt), sp, atomic_read 1636 net/ipv4/udp.c atomic_read(&sp->sk_drops), len); atomic_read 191 net/ipv4/xfrm4_policy.c return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); atomic_read 155 net/ipv6/inet6_connection_sock.c rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); atomic_read 170 net/ipv6/inet6_connection_sock.c if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { atomic_read 1128 net/ipv6/ip6_fib.c if (atomic_read(&rt->rt6i_ref) != 1) { atomic_read 1144 net/ipv6/ip6_fib.c BUG_ON(atomic_read(&rt->rt6i_ref) != 1); atomic_read 1427 net/ipv6/ip6_fib.c if (atomic_read(&rt->u.dst.__refcnt) == 0 && atomic_read 132 net/ipv6/ip6_flowlabel.c if (atomic_read(&fl->users) == 0) { atomic_read 149 net/ipv6/ip6_flowlabel.c if (!sched && atomic_read(&fl_size)) atomic_read 166 net/ipv6/ip6_flowlabel.c if (fl->fl_net == net && atomic_read(&fl->users) == 0) { atomic_read 409 net/ipv6/ip6_flowlabel.c int room = FL_MAX_SIZE - atomic_read(&fl_size); atomic_read 703 net/ipv6/ip6_flowlabel.c atomic_read(&fl->users), atomic_read 1278 net/ipv6/ip6_output.c if (atomic_read(&sk->sk_wmem_alloc) <= atomic_read 556 net/ipv6/ip6mr.c if (atomic_read(&cache_resolve_queue_len)) atomic_read 567 net/ipv6/ip6mr.c if (atomic_read(&cache_resolve_queue_len)) atomic_read 855 net/ipv6/ip6mr.c if (atomic_read(&cache_resolve_queue_len) >= 10 || atomic_read 1117 net/ipv6/ip6mr.c if (atomic_read(&cache_resolve_queue_len) != 0) { atomic_read 643 net/ipv6/ndisc.c int probes = atomic_read(&neigh->probes); atomic_read 607 net/ipv6/netfilter/nf_conntrack_reasm.c if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) atomic_read 1132 net/ipv6/raw.c int amount = atomic_read(&sk->sk_wmem_alloc); atomic_read 1238 net/ipv6/raw.c atomic_read(&sp->sk_wmem_alloc), atomic_read 1239 net/ipv6/raw.c atomic_read(&sp->sk_rmem_alloc), atomic_read 1243 net/ipv6/raw.c atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); atomic_read 92 net/ipv6/reassembly.c return atomic_read(&net->ipv6.frags.mem); atomic_read 605 net/ipv6/reassembly.c if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) atomic_read 992 net/ipv6/route.c if (!atomic_read(&dst->__refcnt)) { atomic_read 1036 net/ipv6/route.c atomic_read(&ops->entries) <= rt_max_size) atomic_read 1042 net/ipv6/route.c if (atomic_read(&ops->entries) < ops->gc_thresh) atomic_read 1046 net/ipv6/route.c return (atomic_read(&ops->entries) > rt_max_size); atomic_read 2428 net/ipv6/route.c rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt), atomic_read 2463 net/ipv6/route.c atomic_read(&net->ipv6.ip6_dst_ops->entries), atomic_read 1930 net/ipv6/tcp_ipv6.c atomic_read(&sp->sk_refcnt), sp, atomic_read 1964 net/ipv6/tcp_ipv6.c atomic_read(&tw->tw_refcnt), tw); atomic_read 985 net/ipv6/udp.c atomic_read(&sp->sk_wmem_alloc), atomic_read 986 net/ipv6/udp.c atomic_read(&sp->sk_rmem_alloc), atomic_read 990 net/ipv6/udp.c atomic_read(&sp->sk_refcnt), sp, atomic_read 991 net/ipv6/udp.c atomic_read(&sp->sk_drops)); atomic_read 65 net/ipv6/xfrm6_input.c if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { atomic_read 221 net/ipv6/xfrm6_policy.c return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2); atomic_read 1838 net/ipx/af_ipx.c amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 85 net/ipx/ipx_proc.c seq_printf(seq, "%6d", atomic_read(&i->refcnt)); atomic_read 283 net/ipx/ipx_proc.c atomic_read(&s->sk_wmem_alloc), atomic_read 284 net/ipx/ipx_proc.c atomic_read(&s->sk_rmem_alloc), atomic_read 1367 net/irda/af_irda.c if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { atomic_read 1486 net/irda/af_irda.c if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { atomic_read 1765 net/irda/af_irda.c amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 1085 net/iucv/af_iucv.c len = atomic_read(&sk->sk_rmem_alloc); atomic_read 72 net/key/af_key.c if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf) atomic_read 100 net/key/af_key.c WARN_ON(atomic_read(&sk->sk_rmem_alloc)); atomic_read 101 net/key/af_key.c WARN_ON(atomic_read(&sk->sk_wmem_alloc)); atomic_read 110 net/key/af_key.c if (atomic_read(&pfkey_table_users)) { atomic_read 116 net/key/af_key.c if (atomic_read(&pfkey_table_users) == 0) atomic_read 231 net/key/af_key.c if (atomic_read(&skb->users) != 1) { atomic_read 239 net/key/af_key.c if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { atomic_read 2121 net/key/af_key.c hdr->sadb_msg_reserved = atomic_read(&xp->refcnt); atomic_read 2945 net/key/af_key.c if (atomic_read(&pfkey_socks_nr) == 0) atomic_read 3587 net/key/af_key.c 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) atomic_read 3637 net/key/af_key.c atomic_read(&s->sk_refcnt), atomic_read 3638 net/key/af_key.c atomic_read(&s->sk_rmem_alloc), atomic_read 3639 net/key/af_key.c atomic_read(&s->sk_wmem_alloc), atomic_read 877 net/llc/llc_conn.c __func__, atomic_read(&llc_sock_nr)); atomic_read 905 net/llc/llc_conn.c if (atomic_read(&sk->sk_refcnt) != 1) { atomic_read 907 net/llc/llc_conn.c sk, __func__, atomic_read(&sk->sk_refcnt)); atomic_read 909 net/llc/llc_conn.c atomic_read(&llc_sock_nr)); atomic_read 913 net/llc/llc_conn.c __func__, atomic_read(&llc_sock_nr)); atomic_read 138 net/llc/llc_proc.c atomic_read(&sk->sk_wmem_alloc), atomic_read 139 net/llc/llc_proc.c atomic_read(&sk->sk_rmem_alloc) - llc->copied_seq, atomic_read 99 net/mac80211/debugfs_netdev.c return scnprintf(buf, buflen, "%d\n", atomic_read(&sdata->field));\ atomic_read 55 net/mac80211/main.c if (atomic_read(&local->iff_promiscs)) atomic_read 58 net/mac80211/main.c if (atomic_read(&local->iff_allmultis)) atomic_read 366 net/mac80211/mesh.c if (atomic_read(&tbl->entries) atomic_read 377 net/mac80211/mesh.c atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); atomic_read 282 net/mac80211/mesh.h atomic_read(&sdata->u.mesh.mshstats.estab_plinks); atomic_read 298 net/mac80211/tx.c if (!atomic_read(&tx->sdata->bss->num_sta_ps)) atomic_read 1789 net/mac80211/tx.c if (atomic_read(&bss->num_sta_ps) > 0) atomic_read 541 net/netfilter/ipvs/ip_vs_app.c atomic_read(&inc->usecnt), atomic_read 235 net/netfilter/ipvs/ip_vs_conn.c if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) atomic_read 422 net/netfilter/ipvs/ip_vs_conn.c return atomic_read(&dest->activeconns) atomic_read 423 net/netfilter/ipvs/ip_vs_conn.c + atomic_read(&dest->inactconns); atomic_read 446 net/netfilter/ipvs/ip_vs_conn.c cp->flags |= atomic_read(&dest->conn_flags) & atomic_read 449 net/netfilter/ipvs/ip_vs_conn.c cp->flags |= atomic_read(&dest->conn_flags); atomic_read 460 net/netfilter/ipvs/ip_vs_conn.c cp->flags, atomic_read(&cp->refcnt), atomic_read 461 net/netfilter/ipvs/ip_vs_conn.c atomic_read(&dest->refcnt)); atomic_read 523 net/netfilter/ipvs/ip_vs_conn.c cp->flags, atomic_read(&cp->refcnt), atomic_read 524 net/netfilter/ipvs/ip_vs_conn.c atomic_read(&dest->refcnt)); atomic_read 576 net/netfilter/ipvs/ip_vs_conn.c (atomic_read(&dest->weight) == 0))) { atomic_read 624 net/netfilter/ipvs/ip_vs_conn.c if (atomic_read(&cp->n_control)) atomic_read 636 net/netfilter/ipvs/ip_vs_conn.c if (likely(atomic_read(&cp->refcnt) == 1)) { atomic_read 661 net/netfilter/ipvs/ip_vs_conn.c atomic_read(&cp->refcnt)-1, atomic_read 662 net/netfilter/ipvs/ip_vs_conn.c atomic_read(&cp->n_control)); atomic_read 735 net/netfilter/ipvs/ip_vs_conn.c if (unlikely(pp && atomic_read(&pp->appcnt))) atomic_read 956 net/netfilter/ipvs/ip_vs_conn.c i = atomic_read(&cp->in_pkts); atomic_read 1048 net/netfilter/ipvs/ip_vs_conn.c if (atomic_read(&ip_vs_conn_count) != 0) { atomic_read 421 net/netfilter/ipvs/ip_vs_core.c cp->flags, atomic_read(&cp->refcnt)); atomic_read 1351 net/netfilter/ipvs/ip_vs_core.c (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] atomic_read 253 net/netfilter/ipvs/ip_vs_ctl.c if (atomic_read(&ip_vs_dropentry)) atomic_read 469 net/netfilter/ipvs/ip_vs_ctl.c && atomic_read(&ip_vs_ftpsvc_counter) atomic_read 479 net/netfilter/ipvs/ip_vs_ctl.c && atomic_read(&ip_vs_nullsvc_counter)) { atomic_read 687 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->refcnt)); atomic_read 703 net/netfilter/ipvs/ip_vs_ctl.c if (atomic_read(&dest->refcnt) == 1) { atomic_read 920 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->refcnt), atomic_read 939 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); atomic_read 970 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); atomic_read 1027 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); atomic_read 1074 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->refcnt)); atomic_read 1127 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); atomic_read 1292 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); atomic_read 1391 net/netfilter/ipvs/ip_vs_ctl.c if (atomic_read(&svc->refcnt) == 0) atomic_read 1416 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); atomic_read 1444 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0); atomic_read 1461 net/netfilter/ipvs/ip_vs_ctl.c IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0); atomic_read 1902 net/netfilter/ipvs/ip_vs_ctl.c ip_vs_fwd_name(atomic_read(&dest->conn_flags)), atomic_read 1903 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->weight), atomic_read 1904 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->activeconns), atomic_read 1905 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->inactconns)); atomic_read 1913 net/netfilter/ipvs/ip_vs_ctl.c ip_vs_fwd_name(atomic_read(&dest->conn_flags)), atomic_read 1914 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->weight), atomic_read 1915 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->activeconns), atomic_read 1916 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->inactconns)); atomic_read 2306 net/netfilter/ipvs/ip_vs_ctl.c entry.conn_flags = atomic_read(&dest->conn_flags); atomic_read 2307 net/netfilter/ipvs/ip_vs_ctl.c entry.weight = atomic_read(&dest->weight); atomic_read 2310 net/netfilter/ipvs/ip_vs_ctl.c entry.activeconns = atomic_read(&dest->activeconns); atomic_read 2311 net/netfilter/ipvs/ip_vs_ctl.c entry.inactconns = atomic_read(&dest->inactconns); atomic_read 2312 net/netfilter/ipvs/ip_vs_ctl.c entry.persistconns = atomic_read(&dest->persistconns); atomic_read 2814 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); atomic_read 2815 net/netfilter/ipvs/ip_vs_ctl.c NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); atomic_read 2819 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->activeconns)); atomic_read 2821 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->inactconns)); atomic_read 2823 net/netfilter/ipvs/ip_vs_ctl.c atomic_read(&dest->persistconns)); atomic_read 213 net/netfilter/ipvs/ip_vs_dh.c || atomic_read(&dest->weight) <= 0 atomic_read 282 net/netfilter/ipvs/ip_vs_lblc.c if (atomic_read(&tbl->entries) <= tbl->max_size) { atomic_read 287 net/netfilter/ipvs/ip_vs_lblc.c goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; atomic_read 397 net/netfilter/ipvs/ip_vs_lblc.c if (atomic_read(&dest->weight) > 0) { atomic_read 399 net/netfilter/ipvs/ip_vs_lblc.c loh = atomic_read(&least->activeconns) * 50 atomic_read 400 net/netfilter/ipvs/ip_vs_lblc.c + atomic_read(&least->inactconns); atomic_read 414 net/netfilter/ipvs/ip_vs_lblc.c doh = atomic_read(&dest->activeconns) * 50 atomic_read 415 net/netfilter/ipvs/ip_vs_lblc.c + atomic_read(&dest->inactconns); atomic_read 416 net/netfilter/ipvs/ip_vs_lblc.c if (loh * atomic_read(&dest->weight) > atomic_read 417 net/netfilter/ipvs/ip_vs_lblc.c doh * atomic_read(&least->weight)) { atomic_read 426 net/netfilter/ipvs/ip_vs_lblc.c atomic_read(&least->activeconns), atomic_read 427 net/netfilter/ipvs/ip_vs_lblc.c atomic_read(&least->refcnt), atomic_read 428 net/netfilter/ipvs/ip_vs_lblc.c atomic_read(&least->weight), loh); atomic_read 441 net/netfilter/ipvs/ip_vs_lblc.c if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { atomic_read 445 net/netfilter/ipvs/ip_vs_lblc.c if (atomic_read(&d->activeconns)*2 atomic_read 446 net/netfilter/ipvs/ip_vs_lblc.c < atomic_read(&d->weight)) { atomic_read 490 net/netfilter/ipvs/ip_vs_lblc.c if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) atomic_read 179 net/netfilter/ipvs/ip_vs_lblcr.c if ((atomic_read(&least->weight) > 0) atomic_read 181 net/netfilter/ipvs/ip_vs_lblcr.c loh = atomic_read(&least->activeconns) * 50 atomic_read 182 net/netfilter/ipvs/ip_vs_lblcr.c + atomic_read(&least->inactconns); atomic_read 195 net/netfilter/ipvs/ip_vs_lblcr.c doh = atomic_read(&dest->activeconns) * 50 atomic_read 196 net/netfilter/ipvs/ip_vs_lblcr.c + atomic_read(&dest->inactconns); atomic_read 197 net/netfilter/ipvs/ip_vs_lblcr.c if ((loh * atomic_read(&dest->weight) > atomic_read 198 net/netfilter/ipvs/ip_vs_lblcr.c doh * atomic_read(&least->weight)) atomic_read 208 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&least->activeconns), atomic_read 209 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&least->refcnt), atomic_read 210 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&least->weight), loh); atomic_read 228 net/netfilter/ipvs/ip_vs_lblcr.c if (atomic_read(&most->weight) > 0) { atomic_read 229 net/netfilter/ipvs/ip_vs_lblcr.c moh = atomic_read(&most->activeconns) * 50 atomic_read 230 net/netfilter/ipvs/ip_vs_lblcr.c + atomic_read(&most->inactconns); atomic_read 240 net/netfilter/ipvs/ip_vs_lblcr.c doh = atomic_read(&dest->activeconns) * 50 atomic_read 241 net/netfilter/ipvs/ip_vs_lblcr.c + atomic_read(&dest->inactconns); atomic_read 243 net/netfilter/ipvs/ip_vs_lblcr.c if ((moh * atomic_read(&dest->weight) < atomic_read 244 net/netfilter/ipvs/ip_vs_lblcr.c doh * atomic_read(&most->weight)) atomic_read 245 net/netfilter/ipvs/ip_vs_lblcr.c && (atomic_read(&dest->weight) > 0)) { atomic_read 254 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&most->activeconns), atomic_read 255 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&most->refcnt), atomic_read 256 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&most->weight), moh); atomic_read 458 net/netfilter/ipvs/ip_vs_lblcr.c if (atomic_read(&tbl->entries) <= tbl->max_size) { atomic_read 463 net/netfilter/ipvs/ip_vs_lblcr.c goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; atomic_read 573 net/netfilter/ipvs/ip_vs_lblcr.c if (atomic_read(&dest->weight) > 0) { atomic_read 575 net/netfilter/ipvs/ip_vs_lblcr.c loh = atomic_read(&least->activeconns) * 50 atomic_read 576 net/netfilter/ipvs/ip_vs_lblcr.c + atomic_read(&least->inactconns); atomic_read 590 net/netfilter/ipvs/ip_vs_lblcr.c doh = atomic_read(&dest->activeconns) * 50 atomic_read 591 net/netfilter/ipvs/ip_vs_lblcr.c + atomic_read(&dest->inactconns); atomic_read 592 net/netfilter/ipvs/ip_vs_lblcr.c if (loh * atomic_read(&dest->weight) > atomic_read 593 net/netfilter/ipvs/ip_vs_lblcr.c doh * atomic_read(&least->weight)) { atomic_read 602 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&least->activeconns), atomic_read 603 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&least->refcnt), atomic_read 604 net/netfilter/ipvs/ip_vs_lblcr.c atomic_read(&least->weight), loh); atomic_read 617 net/netfilter/ipvs/ip_vs_lblcr.c if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { atomic_read 621 net/netfilter/ipvs/ip_vs_lblcr.c if (atomic_read(&d->activeconns)*2 atomic_read 622 net/netfilter/ipvs/ip_vs_lblcr.c < atomic_read(&d->weight)) { atomic_read 657 net/netfilter/ipvs/ip_vs_lblcr.c if (atomic_read(&en->set.size) > 1 && atomic_read 33 net/netfilter/ipvs/ip_vs_lc.c return (atomic_read(&dest->activeconns) << 8) + atomic_read 34 net/netfilter/ipvs/ip_vs_lc.c atomic_read(&dest->inactconns); atomic_read 60 net/netfilter/ipvs/ip_vs_lc.c atomic_read(&dest->weight) == 0) atomic_read 72 net/netfilter/ipvs/ip_vs_lc.c atomic_read(&least->activeconns), atomic_read 73 net/netfilter/ipvs/ip_vs_lc.c atomic_read(&least->inactconns)); atomic_read 47 net/netfilter/ipvs/ip_vs_nq.c return atomic_read(&dest->activeconns) + 1; atomic_read 78 net/netfilter/ipvs/ip_vs_nq.c !atomic_read(&dest->weight)) atomic_read 84 net/netfilter/ipvs/ip_vs_nq.c if (atomic_read(&dest->activeconns) == 0) { atomic_read 91 net/netfilter/ipvs/ip_vs_nq.c (loh * atomic_read(&dest->weight) > atomic_read 92 net/netfilter/ipvs/ip_vs_nq.c doh * atomic_read(&least->weight))) { atomic_read 105 net/netfilter/ipvs/ip_vs_nq.c atomic_read(&least->activeconns), atomic_read 106 net/netfilter/ipvs/ip_vs_nq.c atomic_read(&least->refcnt), atomic_read 107 net/netfilter/ipvs/ip_vs_nq.c atomic_read(&least->weight), loh); atomic_read 542 net/netfilter/ipvs/ip_vs_proto_tcp.c atomic_read(&cp->refcnt)); atomic_read 66 net/netfilter/ipvs/ip_vs_rr.c atomic_read(&dest->weight) > 0) atomic_read 80 net/netfilter/ipvs/ip_vs_rr.c atomic_read(&dest->activeconns), atomic_read 81 net/netfilter/ipvs/ip_vs_rr.c atomic_read(&dest->refcnt), atomic_read(&dest->weight)); atomic_read 51 net/netfilter/ipvs/ip_vs_sed.c return atomic_read(&dest->activeconns) + 1; atomic_read 81 net/netfilter/ipvs/ip_vs_sed.c atomic_read(&dest->weight) > 0) { atomic_read 97 net/netfilter/ipvs/ip_vs_sed.c if (loh * atomic_read(&dest->weight) > atomic_read 98 net/netfilter/ipvs/ip_vs_sed.c doh * atomic_read(&least->weight)) { atomic_read 107 net/netfilter/ipvs/ip_vs_sed.c atomic_read(&least->activeconns), atomic_read 108 net/netfilter/ipvs/ip_vs_sed.c atomic_read(&least->refcnt), atomic_read 109 net/netfilter/ipvs/ip_vs_sed.c atomic_read(&least->weight), loh); atomic_read 210 net/netfilter/ipvs/ip_vs_sh.c || atomic_read(&dest->weight) <= 0 atomic_read 38 net/netfilter/ipvs/ip_vs_wlc.c return (atomic_read(&dest->activeconns) << 8) + atomic_read 39 net/netfilter/ipvs/ip_vs_wlc.c atomic_read(&dest->inactconns); atomic_read 69 net/netfilter/ipvs/ip_vs_wlc.c atomic_read(&dest->weight) > 0) { atomic_read 85 net/netfilter/ipvs/ip_vs_wlc.c if (loh * atomic_read(&dest->weight) > atomic_read 86 net/netfilter/ipvs/ip_vs_wlc.c doh * atomic_read(&least->weight)) { atomic_read 95 net/netfilter/ipvs/ip_vs_wlc.c atomic_read(&least->activeconns), atomic_read 96 net/netfilter/ipvs/ip_vs_wlc.c atomic_read(&least->refcnt), atomic_read 97 net/netfilter/ipvs/ip_vs_wlc.c atomic_read(&least->weight), loh); atomic_read 59 net/netfilter/ipvs/ip_vs_wrr.c weight = atomic_read(&dest->weight); atomic_read 80 net/netfilter/ipvs/ip_vs_wrr.c if (atomic_read(&dest->weight) > weight) atomic_read 81 net/netfilter/ipvs/ip_vs_wrr.c weight = atomic_read(&dest->weight); atomic_read 184 net/netfilter/ipvs/ip_vs_wrr.c atomic_read(&dest->weight) >= mark->cw) { atomic_read 201 net/netfilter/ipvs/ip_vs_wrr.c atomic_read(&dest->activeconns), atomic_read 202 net/netfilter/ipvs/ip_vs_wrr.c atomic_read(&dest->refcnt), atomic_read 203 net/netfilter/ipvs/ip_vs_wrr.c atomic_read(&dest->weight)); atomic_read 93 net/netfilter/ipvs/ip_vs_xmit.c atomic_read(&rt->u.dst.__refcnt), rtos); atomic_read 152 net/netfilter/ipvs/ip_vs_xmit.c atomic_read(&rt->u.dst.__refcnt)); atomic_read 174 net/netfilter/nf_conntrack_core.c NF_CT_ASSERT(atomic_read(&nfct->use) == 0); atomic_read 477 net/netfilter/nf_conntrack_core.c unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { atomic_read 1026 net/netfilter/nf_conntrack_core.c if (atomic_read(&net->ct.count) != 0) { atomic_read 1031 net/netfilter/nf_conntrack_core.c while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) atomic_read 351 net/netfilter/nf_conntrack_netlink.c NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); atomic_read 168 net/netfilter/nf_conntrack_standalone.c if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) atomic_read 235 net/netfilter/nf_conntrack_standalone.c unsigned int nr_conntracks = atomic_read(&net->ct.count); atomic_read 905 net/netfilter/nfnetlink_log.c inst->flushtimeout, atomic_read(&inst->use)); atomic_read 413 net/netlabel/netlabel_kapi.c return (atomic_read(&netlabel_mgmt_protocount) > 0); atomic_read 162 net/netlink/af_netlink.c WARN_ON(atomic_read(&sk->sk_rmem_alloc)); atomic_read 163 net/netlink/af_netlink.c WARN_ON(atomic_read(&sk->sk_wmem_alloc)); atomic_read 178 net/netlink/af_netlink.c if (atomic_read(&nl_table_users)) { atomic_read 184 net/netlink/af_netlink.c if (atomic_read(&nl_table_users) == 0) atomic_read 770 net/netlink/af_netlink.c if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || atomic_read 784 net/netlink/af_netlink.c if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || atomic_read 930 net/netlink/af_netlink.c if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && atomic_read 935 net/netlink/af_netlink.c return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; atomic_read 1338 net/netlink/af_netlink.c if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) atomic_read 1847 net/netlink/af_netlink.c atomic_read(&s->sk_rmem_alloc), atomic_read 1848 net/netlink/af_netlink.c atomic_read(&s->sk_wmem_alloc), atomic_read 1850 net/netlink/af_netlink.c atomic_read(&s->sk_refcnt) atomic_read 289 net/netrom/af_netrom.c if (atomic_read(&sk->sk_wmem_alloc) || atomic_read 290 net/netrom/af_netrom.c atomic_read(&sk->sk_rmem_alloc)) { atomic_read 1203 net/netrom/af_netrom.c amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 1339 net/netrom/af_netrom.c atomic_read(&s->sk_wmem_alloc), atomic_read 1340 net/netrom/af_netrom.c atomic_read(&s->sk_rmem_alloc), atomic_read 141 net/netrom/nr_timer.c if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && atomic_read 263 net/packet/af_packet.c WARN_ON(atomic_read(&sk->sk_rmem_alloc)); atomic_read 264 net/packet/af_packet.c WARN_ON(atomic_read(&sk->sk_wmem_alloc)); atomic_read 522 net/packet/af_packet.c if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= atomic_read 651 net/packet/af_packet.c atomic_read(&sk->sk_rmem_alloc) + skb->truesize < atomic_read 1645 net/packet/af_packet.c int amount = atomic_read(&sk->sk_wmem_alloc); atomic_read 1866 net/packet/af_packet.c if (closing || atomic_read(&po->mapped) == 0) { atomic_read 1884 net/packet/af_packet.c if (atomic_read(&po->mapped)) atomic_read 1885 net/packet/af_packet.c printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); atomic_read 2048 net/packet/af_packet.c atomic_read(&s->sk_refcnt), atomic_read 2053 net/packet/af_packet.c atomic_read(&s->sk_rmem_alloc), atomic_read 359 net/rose/af_rose.c if (atomic_read(&sk->sk_wmem_alloc) || atomic_read 360 net/rose/af_rose.c atomic_read(&sk->sk_rmem_alloc)) { atomic_read 1309 net/rose/af_rose.c amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 1480 net/rose/af_rose.c atomic_read(&s->sk_wmem_alloc), atomic_read 1481 net/rose/af_rose.c atomic_read(&s->sk_rmem_alloc), atomic_read 184 net/rose/rose_in.c if (atomic_read(&sk->sk_rmem_alloc) > atomic_read 151 net/rose/rose_timer.c if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && atomic_read 55 net/rxrpc/af_rxrpc.c return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf; atomic_read 334 net/rxrpc/af_rxrpc.c _enter("%d{%d}", call->debug_id, atomic_read(&call->usage)); atomic_read 663 net/rxrpc/af_rxrpc.c WARN_ON(atomic_read(&sk->sk_wmem_alloc)); atomic_read 680 net/rxrpc/af_rxrpc.c _enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt)); atomic_read 872 net/rxrpc/af_rxrpc.c ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); atomic_read 151 net/rxrpc/ar-accept.c ASSERTCMP(atomic_read(&call->usage), >=, 3); atomic_read 214 net/rxrpc/ar-accept.c if (atomic_read(&local->usage) > 0) atomic_read 177 net/rxrpc/ar-ack.c atomic_read(&call->sequence), atomic_read 404 net/rxrpc/ar-ack.c rxrpc_rotate_tx_window(call, atomic_read(&call->sequence)); atomic_read 630 net/rxrpc/ar-ack.c tx = atomic_read(&call->sequence); atomic_read 1162 net/rxrpc/ar-ack.c ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) - atomic_read 229 net/rxrpc/ar-call.c _leave(" = %p [extant %d]", call, atomic_read(&call->usage)); atomic_read 237 net/rxrpc/ar-call.c _leave(" = %p [second %d]", call, atomic_read(&call->usage)); atomic_read 411 net/rxrpc/ar-call.c _leave(" = %p [%d]", call, atomic_read(&call->usage)); atomic_read 424 net/rxrpc/ar-call.c call->debug_id, atomic_read(&call->usage), atomic_read 425 net/rxrpc/ar-call.c atomic_read(&call->ackr_not_idle), atomic_read 620 net/rxrpc/ar-call.c _enter("%p{u=%d}", call, atomic_read(&call->usage)); atomic_read 622 net/rxrpc/ar-call.c ASSERTCMP(atomic_read(&call->usage), >, 0); atomic_read 705 net/rxrpc/ar-call.c call, atomic_read(&call->usage), call->channel, call->conn); atomic_read 734 net/rxrpc/ar-call.c switch (atomic_read(&call->usage)) { atomic_read 747 net/rxrpc/ar-call.c call, atomic_read(&call->usage), atomic_read 748 net/rxrpc/ar-call.c atomic_read(&call->ackr_not_idle), atomic_read 150 net/rxrpc/ar-connection.c _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage)); atomic_read 163 net/rxrpc/ar-connection.c _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage)); atomic_read 173 net/rxrpc/ar-connection.c _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage)); atomic_read 702 net/rxrpc/ar-connection.c _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); atomic_read 790 net/rxrpc/ar-connection.c conn, atomic_read(&conn->usage), conn->debug_id); atomic_read 792 net/rxrpc/ar-connection.c ASSERTCMP(atomic_read(&conn->usage), >, 0); atomic_read 808 net/rxrpc/ar-connection.c _enter("%p{%d}", conn, atomic_read(&conn->usage)); atomic_read 810 net/rxrpc/ar-connection.c ASSERTCMP(atomic_read(&conn->usage), ==, 0); atomic_read 844 net/rxrpc/ar-connection.c conn->debug_id, atomic_read(&conn->usage), atomic_read 847 net/rxrpc/ar-connection.c if (likely(atomic_read(&conn->usage) > 0)) atomic_read 854 net/rxrpc/ar-connection.c if (atomic_read(&conn->usage) > 0) { atomic_read 891 net/rxrpc/ar-connection.c ASSERTCMP(atomic_read(&conn->usage), ==, 0); atomic_read 342 net/rxrpc/ar-input.c hi_serial = atomic_read(&call->conn->hi_serial); atomic_read 618 net/rxrpc/ar-input.c if (atomic_read(&call->usage) == 0) atomic_read 685 net/rxrpc/ar-input.c if (local && atomic_read(&local->usage) > 0) atomic_read 16 net/rxrpc/ar-internal.h BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \ atomic_read 222 net/rxrpc/ar-local.c _enter("%p{u=%d}", local, atomic_read(&local->usage)); atomic_read 224 net/rxrpc/ar-local.c ASSERTCMP(atomic_read(&local->usage), >, 0); atomic_read 245 net/rxrpc/ar-local.c _enter("%p{%d}", local, atomic_read(&local->usage)); atomic_read 250 net/rxrpc/ar-local.c if (atomic_read(&local->usage) > 0) { atomic_read 137 net/rxrpc/ar-peer.c atomic_read(&peer->usage), atomic_read 141 net/rxrpc/ar-peer.c if (atomic_read(&peer->usage) > 0 && atomic_read 162 net/rxrpc/ar-peer.c if (atomic_read(&peer->usage) > 0 && atomic_read 188 net/rxrpc/ar-peer.c _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); atomic_read 222 net/rxrpc/ar-peer.c if (atomic_read(&peer->usage) > 0 && atomic_read 256 net/rxrpc/ar-peer.c _enter("%p{u=%d}", peer, atomic_read(&peer->usage)); atomic_read 258 net/rxrpc/ar-peer.c ASSERTCMP(atomic_read(&peer->usage), >, 0); atomic_read 277 net/rxrpc/ar-peer.c _enter("%p{%d}", peer, atomic_read(&peer->usage)); atomic_read 81 net/rxrpc/ar-proc.c atomic_read(&call->usage), atomic_read 164 net/rxrpc/ar-proc.c atomic_read(&conn->usage), atomic_read 167 net/rxrpc/ar-proc.c atomic_read(&conn->serial), atomic_read 168 net/rxrpc/ar-proc.c atomic_read(&conn->hi_serial)); atomic_read 127 net/rxrpc/ar-transport.c _leave(" = %p {u=%d}", trans, atomic_read(&trans->usage)); atomic_read 182 net/rxrpc/ar-transport.c _enter("%p{u=%d}", trans, atomic_read(&trans->usage)); atomic_read 184 net/rxrpc/ar-transport.c ASSERTCMP(atomic_read(&trans->usage), >, 0); atomic_read 230 net/rxrpc/ar-transport.c trans->debug_id, atomic_read(&trans->usage), atomic_read 233 net/rxrpc/ar-transport.c if (likely(atomic_read(&trans->usage) > 0)) atomic_read 257 net/rxrpc/ar-transport.c ASSERTCMP(atomic_read(&trans->usage), ==, 0); atomic_read 320 net/sched/em_meta.c dst->value = atomic_read(&skb->sk->sk_refcnt); atomic_read 350 net/sched/em_meta.c dst->value = atomic_read(&skb->sk->sk_rmem_alloc); atomic_read 356 net/sched/em_meta.c dst->value = atomic_read(&skb->sk->sk_wmem_alloc); atomic_read 362 net/sched/em_meta.c dst->value = atomic_read(&skb->sk->sk_omem_alloc); atomic_read 631 net/sched/sch_api.c if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) atomic_read 1163 net/sched/sch_api.c tcm->tcm_info = atomic_read(&q->refcnt); atomic_read 468 net/sctp/associola.c WARN_ON(atomic_read(&asoc->rmem_alloc)); atomic_read 90 net/sctp/objcnt.c atomic_read(sctp_dbg_objcnt[i].counter), &len); atomic_read 991 net/sctp/outqueue.c atomic_read(&chunk->skb->users) : -1); atomic_read 327 net/sctp/proc.c atomic_read(&assoc->rmem_alloc), atomic_read 133 net/sctp/socket.c amt = atomic_read(&asoc->base.sk->sk_wmem_alloc); atomic_read 6404 net/sctp/socket.c amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); atomic_read 6631 net/sctp/socket.c if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { atomic_read 697 net/sctp/ulpevent.c rx_count = atomic_read(&asoc->rmem_alloc); atomic_read 699 net/sctp/ulpevent.c rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); atomic_read 225 net/sctp/ulpqueue.c if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) { atomic_read 492 net/sctp/ulpqueue.c atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) atomic_read 1020 net/sctp/ulpqueue.c if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) { atomic_read 135 net/sunrpc/auth.c if (atomic_read(&cred->cr_count) == 0) atomic_read 240 net/sunrpc/auth.c if (atomic_read(&cred->cr_count) != 0) atomic_read 248 net/sunrpc/auth.c if (atomic_read(&cred->cr_count) == 0) { atomic_read 383 net/sunrpc/cache.c if (cd->entries || atomic_read(&cd->inuse)) { atomic_read 466 net/sunrpc/cache.c if (atomic_read(&ch->ref.refcount) == 1) atomic_read 1046 net/sunrpc/cache.c if (atomic_read(&detail->readers) == 0 && atomic_read 1231 net/sunrpc/cache.c cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags); atomic_read 399 net/sunrpc/svc_xprt.c xprt, atomic_read(&xprt->xpt_ref.refcount)); atomic_read 708 net/sunrpc/svc_xprt.c atomic_read(&xprt->xpt_ref.refcount)); atomic_read 808 net/sunrpc/svc_xprt.c if (atomic_read(&xprt->xpt_ref.refcount) > 1 atomic_read 855 net/sunrpc/svc_xprt.c BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); atomic_read 572 net/sunrpc/svcsock.c required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg; atomic_read 996 net/sunrpc/svcsock.c required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg; atomic_read 96 net/sunrpc/xprtrdma/svc_rdma.c int len = snprintf(str_buf, 32, "%d\n", atomic_read(stat)); atomic_read 251 net/sunrpc/xprtrdma/svc_rdma_transport.c if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) atomic_read 415 net/sunrpc/xprtrdma/svc_rdma_transport.c if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0) atomic_read 1120 net/sunrpc/xprtrdma/svc_rdma_transport.c BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); atomic_read 1148 net/sunrpc/xprtrdma/svc_rdma_transport.c WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0); atomic_read 1149 net/sunrpc/xprtrdma/svc_rdma_transport.c WARN_ON(atomic_read(&rdma->sc_dma_used) != 0); atomic_read 1193 net/sunrpc/xprtrdma/svc_rdma_transport.c if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3)) atomic_read 1259 net/sunrpc/xprtrdma/svc_rdma_transport.c if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) { atomic_read 1268 net/sunrpc/xprtrdma/svc_rdma_transport.c atomic_read(&xprt->sc_sq_count) < atomic_read 1288 net/sunrpc/xprtrdma/svc_rdma_transport.c ret, atomic_read(&xprt->sc_sq_count), atomic_read 501 net/sunrpc/xprtrdma/transport.c int credits = atomic_read(&r_xprt->rx_buf.rb_credits); atomic_read 777 net/tipc/name_table.c if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) { atomic_read 1249 net/tipc/socket.c recv_q_len = (u32)atomic_read(&tipc_queue_size); atomic_read 344 net/tipc/subscr.c if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { atomic_read 312 net/unix/af_unix.c return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; atomic_read 353 net/unix/af_unix.c WARN_ON(atomic_read(&sk->sk_wmem_alloc)); atomic_read 366 net/unix/af_unix.c printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks)); atomic_read 588 net/unix/af_unix.c if (atomic_read(&unix_nr_socks) > 2 * get_max_files()) atomic_read 1926 net/unix/af_unix.c amount = atomic_read(&sk->sk_wmem_alloc); atomic_read 2136 net/unix/af_unix.c atomic_read(&s->sk_refcnt), atomic_read 377 net/x25/af_x25.c if (atomic_read(&sk->sk_wmem_alloc) || atomic_read 378 net/x25/af_x25.c atomic_read(&sk->sk_rmem_alloc)) { atomic_read 1258 net/x25/af_x25.c atomic_read(&sk->sk_wmem_alloc); atomic_read 249 net/x25/x25_in.c if (atomic_read(&sk->sk_rmem_alloc) > atomic_read 166 net/x25/x25_proc.c atomic_read(&s->sk_wmem_alloc), atomic_read 167 net/x25/x25_proc.c atomic_read(&s->sk_rmem_alloc), atomic_read 362 net/x25/x25_subr.c if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) && atomic_read 125 net/xfrm/xfrm_input.c if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { atomic_read 279 net/xfrm/xfrm_policy.c if (atomic_read(&policy->refcnt) > 1) atomic_read 1564 net/xfrm/xfrm_policy.c genid = atomic_read(&flow_cache_genid); atomic_read 1706 net/xfrm/xfrm_policy.c genid != atomic_read(&flow_cache_genid)) { atomic_read 2175 net/xfrm/xfrm_policy.c return !atomic_read(&dst->__refcnt); atomic_read 1975 net/xfrm/xfrm_state.c if (atomic_read(&t->tunnel_users) == 2) atomic_read 83 samples/markers/probe-example.c atomic_read(&eventb_count)); atomic_read 543 security/keys/key.c if (atomic_read(&key->usage) == 0) atomic_read 639 security/keys/key.c if (atomic_read(&key->usage) == 0 || atomic_read 181 security/keys/proc.c atomic_read(&key->usage), atomic_read 252 security/keys/proc.c atomic_read(&user->usage), atomic_read 253 security/keys/proc.c atomic_read(&user->nkeys), atomic_read 254 security/keys/proc.c atomic_read(&user->nikeys), atomic_read 271 security/selinux/avc.c atomic_read(&avc_cache.active_nodes), atomic_read 385 security/selinux/avc.c if (atomic_read(&ret->ae.used) != 1) atomic_read 155 security/selinux/hooks.c return (atomic_read(&selinux_secmark_refcount) > 0); atomic_read 5336 security/selinux/hooks.c if (atomic_read(&p->mm->mm_users) != 1) { atomic_read 40 security/selinux/include/xfrm.h return (atomic_read(&selinux_xfrm_refcount) > 0); atomic_read 716 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) { atomic_read 854 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) atomic_read 863 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) atomic_read 985 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count) || atomic_read 994 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count) || atomic_read 1346 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) atomic_read 1446 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) atomic_read 1584 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) atomic_read 1760 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) atomic_read 1974 sound/core/oss/pcm_oss.c if (atomic_read(&psubstream->mmap_count)) atomic_read 2092 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) { atomic_read 2657 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) atomic_read 2666 sound/core/oss/pcm_oss.c if (atomic_read(&substream->mmap_count)) atomic_read 389 sound/core/pcm_native.c if (atomic_read(&substream->mmap_count)) atomic_read 505 sound/core/pcm_native.c if (atomic_read(&substream->mmap_count)) atomic_read 413 sound/core/seq/seq_clientmgr.c if (atomic_read(&fifo->overflow) > 0) { atomic_read 32 sound/core/seq/seq_lock.c if (atomic_read(lockp) < 0) { atomic_read 33 sound/core/seq/seq_lock.c printk(KERN_WARNING "seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); atomic_read 36 sound/core/seq/seq_lock.c while (atomic_read(lockp) > 0) { atomic_read 38 sound/core/seq/seq_lock.c snd_printk(KERN_WARNING "seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line); atomic_read 36 sound/core/seq/seq_memory.c return pool->total_elements - atomic_read(&pool->counter); atomic_read 266 sound/core/seq/seq_memory.c used = atomic_read(&pool->counter); atomic_read 431 sound/core/seq/seq_memory.c while (atomic_read(&pool->counter) > 0) { atomic_read 433 sound/core/seq/seq_memory.c snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); atomic_read 516 sound/core/seq/seq_memory.c snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); atomic_read 72 sound/core/seq/seq_memory.h return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; atomic_read 52 sound/drivers/pcsp/pcsp_input.c if (atomic_read(&pcsp_chip.timer_active) || !pcsp_chip.pcspkr) atomic_read 35 sound/drivers/pcsp/pcsp_lib.c if (!atomic_read(&chip->timer_active)) atomic_read 64 sound/drivers/pcsp/pcsp_lib.c if (!atomic_read(&chip->timer_active)) atomic_read 115 sound/drivers/pcsp/pcsp_lib.c if (!atomic_read(&chip->timer_active)) atomic_read 136 sound/drivers/pcsp/pcsp_lib.c if (atomic_read(&chip->timer_active)) { atomic_read 156 sound/drivers/pcsp/pcsp_lib.c if (!atomic_read(&chip->timer_active)) atomic_read 173 sound/drivers/pcsp/pcsp_lib.c if (atomic_read(&chip->timer_active)) { atomic_read 276 sound/drivers/pcsp/pcsp_lib.c if (atomic_read(&chip->timer_active)) { atomic_read 702 sound/isa/gus/gus_pcm.c if (!wait_event_timeout(pcmp->sleep, (atomic_read(&pcmp->dma_count) <= 0), 2*HZ)) atomic_read 1527 sound/oss/vwsnd.c # define IN_USE (atomic_read(&vwsnd_use_count) != 0) atomic_read 326 sound/pci/echoaudio/echoaudio.c if (atomic_read(&chip->opencount) > 1 && chip->rate_set) atomic_read 329 sound/pci/echoaudio/echoaudio.c chip->can_set_rate, atomic_read(&chip->opencount), atomic_read 362 sound/pci/echoaudio/echoaudio.c if (atomic_read(&chip->opencount) > 1 && chip->rate_set) atomic_read 365 sound/pci/echoaudio/echoaudio.c chip->can_set_rate, atomic_read(&chip->opencount), atomic_read 404 sound/pci/echoaudio/echoaudio.c if (atomic_read(&chip->opencount) > 1 && chip->rate_set) atomic_read 447 sound/pci/echoaudio/echoaudio.c if (atomic_read(&chip->opencount) > 1 && chip->rate_set) atomic_read 471 sound/pci/echoaudio/echoaudio.c oc = atomic_read(&chip->opencount); atomic_read 1424 sound/pci/echoaudio/echoaudio.c if (atomic_read(&chip->opencount)) { atomic_read 308 sound/pci/echoaudio/mona_dsp.c if (atomic_read(&chip->opencount)) atomic_read 456 sound/pci/mixart/mixart.c while (atomic_read(&mgr->msg_processed) > 0) { atomic_read 205 sound/pci/ymfpci/ymfpci_main.c if (atomic_read(&chip->interrupt_sleep_count)) { atomic_read 818 sound/pci/ymfpci/ymfpci_main.c if (atomic_read(&chip->interrupt_sleep_count)) { atomic_read 110 sound/usb/usx2y/us122l.c snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); atomic_read 153 sound/usb/usx2y/us122l.c snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); atomic_read 579 sound/usb/usx2y/us122l.c while (atomic_read(&us122l->mmap_count)) atomic_read 149 sound/usb/usx2y/usbusx2yaudio.c if (atomic_read(&subs->state) >= state_PRERUNNING) atomic_read 217 sound/usb/usx2y/usbusx2yaudio.c state = atomic_read(&playbacksubs->state); atomic_read 244 sound/usb/usx2y/usbusx2yaudio.c state = atomic_read(&capsubs->state); atomic_read 266 sound/usb/usx2y/usbusx2yaudio.c snd_printdd("%i %p state=%i\n", s, subs, atomic_read(&subs->state)); atomic_read 273 sound/usb/usx2y/usbusx2yaudio.c if (atomic_read(&subs->state) >= state_PRERUNNING) { atomic_read 313 sound/usb/usx2y/usbusx2yaudio.c if (unlikely(atomic_read(&subs->state) < state_PREPARED)) { atomic_read 334 sound/usb/usx2y/usbusx2yaudio.c atomic_read(&capsubs->state) >= state_PREPARED && atomic_read 336 sound/usb/usx2y/usbusx2yaudio.c atomic_read(&playbacksubs->state) < state_PREPARED)) { atomic_read 490 sound/usb/usx2y/usbusx2yaudio.c if (subs != NULL && atomic_read(&subs->state) >= state_PREPARED) atomic_read 524 sound/usb/usx2y/usbusx2yaudio.c if (atomic_read(&subs->state) != state_PREPARED) atomic_read 553 sound/usb/usx2y/usbusx2yaudio.c if (atomic_read(&subs->state) == state_PREPARED && atomic_read 554 sound/usb/usx2y/usbusx2yaudio.c atomic_read(&subs->usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE]->state) >= state_PREPARED) { atomic_read 563 sound/usb/usx2y/usbusx2yaudio.c if (atomic_read(&subs->state) >= state_PRERUNNING) atomic_read 823 sound/usb/usx2y/usbusx2yaudio.c if (atomic_read(&playback_subs->state) < state_PREPARED) { atomic_read 849 sound/usb/usx2y/usbusx2yaudio.c if (atomic_read(&capsubs->state) < state_PREPARED) { atomic_read 861 sound/usb/usx2y/usbusx2yaudio.c if (subs != capsubs && atomic_read(&subs->state) < state_PREPARED) atomic_read 136 sound/usb/usx2y/usx2yhwdeppcm.c if (atomic_read(&subs->state) != state_RUNNING) atomic_read 179 sound/usb/usx2y/usx2yhwdeppcm.c state = atomic_read(&playbacksubs->state); atomic_read 206 sound/usb/usx2y/usx2yhwdeppcm.c state = atomic_read(&capsubs->state); atomic_read 235 sound/usb/usx2y/usx2yhwdeppcm.c if (unlikely(atomic_read(&subs->state) < state_PREPARED)) { atomic_read 256 sound/usb/usx2y/usx2yhwdeppcm.c if (capsubs->completed_urb && atomic_read(&capsubs->state) >= state_PREPARED && atomic_read 258 sound/usb/usx2y/usx2yhwdeppcm.c (playbacksubs->completed_urb || atomic_read(&playbacksubs->state) < state_PREPARED)) { atomic_read 385 sound/usb/usx2y/usx2yhwdeppcm.c if (atomic_read(&playback_subs->state) < state_PREPARED) { atomic_read 429 sound/usb/usx2y/usx2yhwdeppcm.c if (subs != NULL && atomic_read(&subs->state) >= state_PREPARED) atomic_read 470 sound/usb/usx2y/usx2yhwdeppcm.c if (atomic_read(&subs->state) != state_PREPARED) atomic_read 505 sound/usb/usx2y/usx2yhwdeppcm.c if (atomic_read(&capsubs->state) < state_PREPARED) { atomic_read 520 sound/usb/usx2y/usx2yhwdeppcm.c if (atomic_read(&subs->state) < state_PREPARED) { atomic_read 97 virt/kvm/kvm_trace.c *val = atomic_read(&kt->lost_records);