rb_next 374 block/as-iosched.c struct rb_node *rbnext = rb_next(&last->rb_node); rb_next 434 block/cfq-iosched.c struct rb_node *rbnext = rb_next(&last->rb_node); rb_next 68 block/deadline-iosched.c struct rb_node *node = rb_next(&rq->rb_node); rb_next 1231 block/elevator.c struct rb_node *rbnext = rb_next(&rq->rb_node); rb_next 433 fs/eventpoll.c for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { rb_next 245 fs/ext2/balloc.c n = rb_next(n); rb_next 820 fs/ext2/balloc.c next = rb_next(&rsv->rsv_node); rb_next 1058 fs/ext2/balloc.c next = rb_next(&my_rsv->rsv_node); rb_next 232 fs/ext3/balloc.c n = rb_next(n); rb_next 1013 fs/ext3/balloc.c next = rb_next(&rsv->rsv_node); rb_next 1253 fs/ext3/balloc.c next = rb_next(&my_rsv->rsv_node); rb_next 488 fs/ext3/dir.c info->curr_node = rb_next(info->curr_node); rb_next 464 fs/ext4/dir.c info->curr_node = rb_next(info->curr_node); rb_next 505 fs/ext4/dir.c info->curr_node = rb_next(info->curr_node); rb_next 343 fs/jffs2/nodelist.h #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) rb_next 350 fs/jffs2/nodelist.h #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) rb_next 369 fs/jffs2/nodelist.h struct rb_node *rb_next(struct rb_node *); rb_next 103 fs/nfs/nfs4state.c for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { rb_next 171 fs/nfs/nfs4state.c parent = rb_next(parent); rb_next 911 fs/nfs/nfs4state.c for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { rb_next 985 fs/nfs/nfs4state.c for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { rb_next 97 fs/proc/nommu.c for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) { rb_next 116 fs/proc/nommu.c return rb_next((struct rb_node *) v); rb_next 611 fs/ubifs/debug.c for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { rb_next 1869 fs/ubifs/debug.c this = rb_next(this); rb_next 192 fs/ubifs/log.c p = rb_next(p); rb_next 348 fs/ubifs/log.c p = rb_next(p); rb_next 1501 fs/ubifs/recovery.c this = rb_next(this); rb_next 1514 fs/ubifs/recovery.c this = rb_next(this); rb_next 306 fs/ubifs/replay.c this = rb_next(this); rb_next 143 include/linux/rbtree.h extern struct rb_node *rb_next(struct rb_node *); rb_next 910 kernel/hrtimer.c base->first = rb_next(&timer->node); rb_next 277 kernel/sched_fair.c next_node = rb_next(&se->run_node); rb_next 92 kernel/time/timer_list.c curr = rb_next(curr); rb_next 1797 mm/mempolicy.c struct rb_node *next = rb_next(&n->nd); rb_next 1902 mm/mempolicy.c next = rb_next(&n->nd); rb_next 309 mm/mmap.c for (nd = rb_first(root); nd; nd = rb_next(nd)) { rb_next 930 mm/nommu.c for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) { rb_next 389 net/802/garp.c next = node ? rb_next(node) : NULL, node != NULL; rb_next 595 net/rxrpc/ar-call.c for (p = rb_first(&rx->calls); p; p = rb_next(p)) { rb_next 283 net/rxrpc/ar-connection.c parent = rb_next(parent); rb_next 38 net/rxrpc/ar-connevent.c for (p = rb_first(&conn->calls); p; p = rb_next(p)) { rb_next 238 net/sched/sch_hfsc.c for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { rb_next 302 net/sched/sch_hfsc.c for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { rb_next 314 net/sched/sch_htb.c *n = rb_next(*n); rb_next 199 security/keys/key.c parent = rb_next(parent); rb_next 540 security/keys/key.c for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { rb_next 950 security/keys/key.c for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { rb_next 966 security/keys/key.c for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { rb_next 110 security/keys/proc.c _p = rb_next(_p); rb_next 120 security/keys/proc.c return rb_next((struct rb_node *) v); rb_next 222 security/keys/proc.c _p = rb_next(_p); rb_next 232 security/keys/proc.c return rb_next((struct rb_node *) v);