rq                148 block/as-iosched.c #define RQ_IOC(rq)	((struct io_context *) (rq)->elevator_private)
rq                149 block/as-iosched.c #define RQ_STATE(rq)	((enum arq_state)(rq)->elevator_private2)
rq                150 block/as-iosched.c #define RQ_SET_STATE(rq, state)	((rq)->elevator_private2 = (void *) state)
rq                244 block/as-iosched.c 	if (unlikely(!RQ_IOC(rq)))
rq                247 block/as-iosched.c 	aic = RQ_IOC(rq)->aic;
rq                249 block/as-iosched.c 	if (rq_is_sync(rq) && aic) {
rq                258 block/as-iosched.c 	put_io_context(RQ_IOC(rq));
rq                264 block/as-iosched.c #define RQ_RB_ROOT(ad, rq)	(&(ad)->sort_list[rq_is_sync((rq))])
rq                270 block/as-iosched.c 	while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) {
rq                278 block/as-iosched.c 	elv_rb_del(RQ_RB_ROOT(ad, rq), rq);
rq                550 block/as-iosched.c 	int data_dir = rq_is_sync(rq);
rq                572 block/as-iosched.c 			if (aic->last_request_pos < rq->sector)
rq                573 block/as-iosched.c 				seek_dist = rq->sector - aic->last_request_pos;
rq                575 block/as-iosched.c 				seek_dist = aic->last_request_pos - rq->sector;
rq                578 block/as-iosched.c 		aic->last_request_pos = rq->sector + rq->nr_sectors;
rq                593 block/as-iosched.c 	sector_t next = rq->sector;
rq                659 block/as-iosched.c 	if (rq && ioc == RQ_IOC(rq)) {
rq                692 block/as-iosched.c 	if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) {
rq                708 block/as-iosched.c 		as_update_iohist(ad, aic, rq);
rq                768 block/as-iosched.c 	if (as_can_break_anticipation(ad, rq))
rq                792 block/as-iosched.c 	const int data_dir = rq_is_sync(rq);
rq                795 block/as-iosched.c 	ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]);
rq                804 block/as-iosched.c 		if (as_can_break_anticipation(ad, rq))
rq                845 block/as-iosched.c 	WARN_ON(!list_empty(&rq->queuelist));
rq                847 block/as-iosched.c 	if (RQ_STATE(rq) != AS_RQ_REMOVED) {
rq                848 block/as-iosched.c 		WARN(1, "rq->state %d\n", RQ_STATE(rq));
rq                869 block/as-iosched.c 	if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
rq                876 block/as-iosched.c 	if (ad->io_context == RQ_IOC(rq) && ad->io_context) {
rq                888 block/as-iosched.c 	as_put_io_context(rq);
rq                890 block/as-iosched.c 	RQ_SET_STATE(rq, AS_RQ_POSTSCHED);
rq                902 block/as-iosched.c 	const int data_dir = rq_is_sync(rq);
rq                906 block/as-iosched.c 	WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
rq                908 block/as-iosched.c 	ioc = RQ_IOC(rq);
rq                918 block/as-iosched.c 	if (ad->next_rq[data_dir] == rq)
rq                919 block/as-iosched.c 		ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
rq                921 block/as-iosched.c 	rq_fifo_clear(rq);
rq                922 block/as-iosched.c 	as_del_rq_rb(ad, rq);
rq                935 block/as-iosched.c 	struct request *rq;
rq                949 block/as-iosched.c 	rq = rq_entry_fifo(ad->fifo_list[adir].next);
rq                951 block/as-iosched.c 	return time_after(jiffies, rq_fifo_time(rq));
rq                976 block/as-iosched.c 	const int data_dir = rq_is_sync(rq);
rq                978 block/as-iosched.c 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
rq                987 block/as-iosched.c 	ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
rq                990 block/as-iosched.c 		struct io_context *ioc = RQ_IOC(rq);
rq               1004 block/as-iosched.c 	ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
rq               1009 block/as-iosched.c 	as_remove_queued_request(ad->q, rq);
rq               1010 block/as-iosched.c 	WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
rq               1012 block/as-iosched.c 	elv_dispatch_sort(ad->q, rq);
rq               1014 block/as-iosched.c 	RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
rq               1015 block/as-iosched.c 	if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
rq               1016 block/as-iosched.c 		atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
rq               1030 block/as-iosched.c 	struct request *rq;
rq               1077 block/as-iosched.c 		rq = ad->next_rq[ad->batch_data_dir];
rq               1083 block/as-iosched.c 			if (as_can_anticipate(ad, rq)) {
rq               1089 block/as-iosched.c 		if (rq) {
rq               1117 block/as-iosched.c 		rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next);
rq               1143 block/as-iosched.c 		rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next);
rq               1158 block/as-iosched.c 		rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
rq               1179 block/as-iosched.c 	as_move_to_dispatch(ad, rq);
rq               1192 block/as-iosched.c 	RQ_SET_STATE(rq, AS_RQ_NEW);
rq               1194 block/as-iosched.c 	data_dir = rq_is_sync(rq);
rq               1196 block/as-iosched.c 	rq->elevator_private = as_get_io_context(q->node);
rq               1198 block/as-iosched.c 	if (RQ_IOC(rq)) {
rq               1199 block/as-iosched.c 		as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
rq               1200 block/as-iosched.c 		atomic_inc(&RQ_IOC(rq)->aic->nr_queued);
rq               1203 block/as-iosched.c 	as_add_rq_rb(ad, rq);
rq               1208 block/as-iosched.c 	rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
rq               1209 block/as-iosched.c 	list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
rq               1211 block/as-iosched.c 	as_update_rq(ad, rq); /* keep state machine up to date */
rq               1212 block/as-iosched.c 	RQ_SET_STATE(rq, AS_RQ_QUEUED);
rq               1217 block/as-iosched.c 	WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
rq               1218 block/as-iosched.c 	RQ_SET_STATE(rq, AS_RQ_REMOVED);
rq               1219 block/as-iosched.c 	if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
rq               1220 block/as-iosched.c 		atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
rq               1225 block/as-iosched.c 	WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
rq               1226 block/as-iosched.c 	RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
rq               1227 block/as-iosched.c 	if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
rq               1228 block/as-iosched.c 		atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
rq                 64 block/blk-barrier.c 	struct request_queue *q = rq->q;
rq                 68 block/blk-barrier.c 	if (rq == &q->pre_flush_rq)
rq                 70 block/blk-barrier.c 	if (rq == &q->bar_rq)
rq                 72 block/blk-barrier.c 	if (rq == &q->post_flush_rq)
rq                 81 block/blk-barrier.c 	if (!blk_fs_request(rq))
rq                 84 block/blk-barrier.c 	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
rq                 93 block/blk-barrier.c 	struct request *rq;
rq                108 block/blk-barrier.c 	rq = q->orig_bar_rq;
rq                110 block/blk-barrier.c 	if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
rq                116 block/blk-barrier.c 	elv_completed_request(rq->q, rq);
rq                117 block/blk-barrier.c 	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
rq                122 block/blk-barrier.c 	elv_completed_request(rq->q, rq);
rq                123 block/blk-barrier.c 	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
rq                128 block/blk-barrier.c 	elv_completed_request(rq->q, rq);
rq                129 block/blk-barrier.c 	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
rq                134 block/blk-barrier.c 	struct request *rq;
rq                138 block/blk-barrier.c 		rq = &q->pre_flush_rq;
rq                141 block/blk-barrier.c 		rq = &q->post_flush_rq;
rq                145 block/blk-barrier.c 	blk_rq_init(q, rq);
rq                146 block/blk-barrier.c 	rq->cmd_flags = REQ_HARDBARRIER;
rq                147 block/blk-barrier.c 	rq->rq_disk = q->bar_rq.rq_disk;
rq                148 block/blk-barrier.c 	rq->end_io = end_io;
rq                149 block/blk-barrier.c 	q->prepare_flush_fn(q, rq);
rq                151 block/blk-barrier.c 	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
rq                164 block/blk-barrier.c 	blkdev_dequeue_request(rq);
rq                165 block/blk-barrier.c 	q->orig_bar_rq = rq;
rq                166 block/blk-barrier.c 	rq = &q->bar_rq;
rq                167 block/blk-barrier.c 	blk_rq_init(q, rq);
rq                169 block/blk-barrier.c 		rq->cmd_flags |= REQ_RW;
rq                171 block/blk-barrier.c 		rq->cmd_flags |= REQ_FUA;
rq                172 block/blk-barrier.c 	init_request_from_bio(rq, q->orig_bar_rq->bio);
rq                173 block/blk-barrier.c 	rq->end_io = bar_end_io;
rq                184 block/blk-barrier.c 	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
rq                189 block/blk-barrier.c 	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
rq                193 block/blk-barrier.c 		rq = &q->pre_flush_rq;
rq                200 block/blk-barrier.c 		rq = NULL;
rq                202 block/blk-barrier.c 	return rq;
rq                207 block/blk-barrier.c 	struct request *rq = *rqp;
rq                208 block/blk-barrier.c 	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
rq                215 block/blk-barrier.c 			*rqp = start_ordered(q, rq);
rq                222 block/blk-barrier.c 			blkdev_dequeue_request(rq);
rq                223 block/blk-barrier.c 			if (__blk_end_request(rq, -EOPNOTSUPP,
rq                224 block/blk-barrier.c 					      blk_rq_bytes(rq)))
rq                236 block/blk-barrier.c 	if (!blk_fs_request(rq) &&
rq                237 block/blk-barrier.c 	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
rq                242 block/blk-barrier.c 		if (is_barrier && rq != &q->bar_rq)
rq                246 block/blk-barrier.c 		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
rq                247 block/blk-barrier.c 		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
rq                 54 block/blk-core.c 	int rw = rq_data_dir(rq);
rq                 57 block/blk-core.c 	if (!blk_fs_request(rq) || !rq->rq_disk)
rq                 61 block/blk-core.c 	part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
rq                110 block/blk-core.c 	memset(rq, 0, sizeof(*rq));
rq                112 block/blk-core.c 	INIT_LIST_HEAD(&rq->queuelist);
rq                113 block/blk-core.c 	INIT_LIST_HEAD(&rq->timeout_list);
rq                114 block/blk-core.c 	rq->cpu = -1;
rq                115 block/blk-core.c 	rq->q = q;
rq                116 block/blk-core.c 	rq->sector = rq->hard_sector = (sector_t) -1;
rq                117 block/blk-core.c 	INIT_HLIST_NODE(&rq->hash);
rq                118 block/blk-core.c 	RB_CLEAR_NODE(&rq->rb_node);
rq                119 block/blk-core.c 	rq->cmd = rq->__cmd;
rq                120 block/blk-core.c 	rq->tag = -1;
rq                121 block/blk-core.c 	rq->ref_count = 1;
rq                128 block/blk-core.c 	struct request_queue *q = rq->q;
rq                130 block/blk-core.c 	if (&q->bar_rq != rq) {
rq                166 block/blk-core.c 		rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
rq                167 block/blk-core.c 		rq->cmd_flags);
rq                170 block/blk-core.c 						(unsigned long long)rq->sector,
rq                171 block/blk-core.c 						rq->nr_sectors,
rq                172 block/blk-core.c 						rq->current_nr_sectors);
rq                174 block/blk-core.c 						rq->bio, rq->biotail,
rq                175 block/blk-core.c 						rq->buffer, rq->data,
rq                176 block/blk-core.c 						rq->data_len);
rq                178 block/blk-core.c 	if (blk_pc_request(rq)) {
rq                181 block/blk-core.c 			printk("%02x ", rq->cmd[bit]);
rq                297 block/blk-core.c 				q->rq.count[READ] + q->rq.count[WRITE]);
rq                307 block/blk-core.c 				q->rq.count[READ] + q->rq.count[WRITE]);
rq                319 block/blk-core.c 					q->rq.count[READ] + q->rq.count[WRITE]);
rq                460 block/blk-core.c 	struct request_list *rl = &q->rq;
rq                618 block/blk-core.c 	if (rq->cmd_flags & REQ_ELVPRIV)
rq                619 block/blk-core.c 		elv_put_request(q, rq);
rq                620 block/blk-core.c 	mempool_free(rq, q->rq.rq_pool);
rq                626 block/blk-core.c 	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
rq                628 block/blk-core.c 	if (!rq)
rq                631 block/blk-core.c 	blk_rq_init(q, rq);
rq                633 block/blk-core.c 	rq->cmd_flags = rw | REQ_ALLOCED;
rq                636 block/blk-core.c 		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
rq                637 block/blk-core.c 			mempool_free(rq, q->rq.rq_pool);
rq                640 block/blk-core.c 		rq->cmd_flags |= REQ_ELVPRIV;
rq                643 block/blk-core.c 	return rq;
rq                682 block/blk-core.c 	struct request_list *rl = &q->rq;
rq                701 block/blk-core.c 	struct request_list *rl = &q->rq;
rq                722 block/blk-core.c 	struct request *rq = NULL;
rq                723 block/blk-core.c 	struct request_list *rl = &q->rq;
rq                776 block/blk-core.c 	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
rq                777 block/blk-core.c 	if (unlikely(!rq)) {
rq                813 block/blk-core.c 	return rq;
rq                826 block/blk-core.c 	struct request *rq;
rq                828 block/blk-core.c 	rq = get_request(q, rw_flags, bio, GFP_NOIO);
rq                829 block/blk-core.c 	while (!rq) {
rq                832 block/blk-core.c 		struct request_list *rl = &q->rq;
rq                855 block/blk-core.c 		rq = get_request(q, rw_flags, bio, GFP_NOIO);
rq                858 block/blk-core.c 	return rq;
rq                863 block/blk-core.c 	struct request *rq;
rq                869 block/blk-core.c 		rq = get_request_wait(q, rw, NULL);
rq                871 block/blk-core.c 		rq = get_request(q, rw, NULL, gfp_mask);
rq                872 block/blk-core.c 		if (!rq)
rq                877 block/blk-core.c 	return rq;
rq                914 block/blk-core.c 	blk_delete_timer(rq);
rq                915 block/blk-core.c 	blk_clear_rq_complete(rq);
rq                916 block/blk-core.c 	blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
rq                918 block/blk-core.c 	if (blk_rq_tagged(rq))
rq                919 block/blk-core.c 		blk_queue_end_tag(q, rq);
rq                921 block/blk-core.c 	elv_requeue_request(q, rq);
rq                955 block/blk-core.c 	rq->cmd_type = REQ_TYPE_SPECIAL;
rq                956 block/blk-core.c 	rq->cmd_flags |= REQ_SOFTBARRIER;
rq                958 block/blk-core.c 	rq->special = data;
rq                965 block/blk-core.c 	if (blk_rq_tagged(rq))
rq                966 block/blk-core.c 		blk_queue_end_tag(q, rq);
rq                968 block/blk-core.c 	drive_stat_acct(rq, 1);
rq                969 block/blk-core.c 	__elv_add_request(q, rq, where, 0);
rq               1558 block/blk-core.c 	if (rq->nr_sectors > q->max_sectors ||
rq               1559 block/blk-core.c 	    rq->data_len > q->max_hw_sectors << 9) {
rq               1570 block/blk-core.c 	blk_recalc_rq_segments(rq);
rq               1571 block/blk-core.c 	if (rq->nr_phys_segments > q->max_phys_segments ||
rq               1572 block/blk-core.c 	    rq->nr_phys_segments > q->max_hw_segments) {
rq               1590 block/blk-core.c 	if (blk_rq_check_limits(q, rq))
rq               1594 block/blk-core.c 	if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
rq               1595 block/blk-core.c 	    should_fail(&fail_make_request, blk_rq_bytes(rq)))
rq               1605 block/blk-core.c 	BUG_ON(blk_queued_rq(rq));
rq               1607 block/blk-core.c 	drive_stat_acct(rq, 1);
rq               1608 block/blk-core.c 	__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
rq               1799 block/blk-core.c 	if (blk_fs_request(rq))
rq               1800 block/blk-core.c 		return rq->hard_nr_sectors << 9;
rq               1802 block/blk-core.c 	return rq->data_len;
rq               1812 block/blk-core.c 	if (blk_fs_request(rq))
rq               1813 block/blk-core.c 		return rq->current_nr_sectors << 9;
rq               1815 block/blk-core.c 	if (rq->bio)
rq               1816 block/blk-core.c 		return rq->bio->bi_size;
rq               1818 block/blk-core.c 	return rq->data_len;
rq               1852 block/blk-core.c 	if (rq->bio) {
rq               1853 block/blk-core.c 		if (__end_that_request_first(rq, error, nr_bytes))
rq               1857 block/blk-core.c 		if (blk_bidi_rq(rq) &&
rq               1858 block/blk-core.c 		    __end_that_request_first(rq->next_rq, error, bidi_bytes))
rq               1888 block/blk-core.c 	struct request_queue *q = rq->q;
rq               1891 block/blk-core.c 	if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
rq               1895 block/blk-core.c 	if (drv_callback && drv_callback(rq))
rq               1898 block/blk-core.c 	add_disk_randomness(rq->rq_disk);
rq               1901 block/blk-core.c 	end_that_request_last(rq, error);
rq               1923 block/blk-core.c 	return blk_end_io(rq, error, nr_bytes, 0, NULL);
rq               1942 block/blk-core.c 	if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
rq               1945 block/blk-core.c 	add_disk_randomness(rq->rq_disk);
rq               1947 block/blk-core.c 	end_that_request_last(rq, error);
rq               1970 block/blk-core.c 	return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
rq               1991 block/blk-core.c 	if (!end_that_request_data(rq, error, nr_bytes, 0)) {
rq               1998 block/blk-core.c 		rq->nr_sectors = rq->hard_nr_sectors = 0;
rq               1999 block/blk-core.c 		rq->current_nr_sectors = rq->hard_cur_sectors = 0;
rq               2033 block/blk-core.c 	return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
rq               2042 block/blk-core.c 	rq->cmd_flags |= (bio->bi_rw & 3);
rq               2045 block/blk-core.c 		rq->nr_phys_segments = bio_phys_segments(q, bio);
rq               2046 block/blk-core.c 		rq->buffer = bio_data(bio);
rq               2048 block/blk-core.c 	rq->current_nr_sectors = bio_cur_sectors(bio);
rq               2049 block/blk-core.c 	rq->hard_cur_sectors = rq->current_nr_sectors;
rq               2050 block/blk-core.c 	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq               2051 block/blk-core.c 	rq->data_len = bio->bi_size;
rq               2053 block/blk-core.c 	rq->bio = rq->biotail = bio;
rq               2056 block/blk-core.c 		rq->rq_disk = bio->bi_bdev->bd_disk;
rq                 23 block/blk-exec.c 	struct completion *waiting = rq->end_io_data;
rq                 25 block/blk-exec.c 	rq->end_io_data = NULL;
rq                 26 block/blk-exec.c 	__blk_put_request(rq->q, rq);
rq                 53 block/blk-exec.c 	rq->rq_disk = bd_disk;
rq                 54 block/blk-exec.c 	rq->cmd_flags |= REQ_NOMERGE;
rq                 55 block/blk-exec.c 	rq->end_io = done;
rq                 58 block/blk-exec.c 	__elv_add_request(q, rq, where, 1);
rq                 61 block/blk-exec.c 	if (blk_pm_resume_request(rq))
rq                 89 block/blk-exec.c 	rq->ref_count++;
rq                 91 block/blk-exec.c 	if (!rq->sense) {
rq                 93 block/blk-exec.c 		rq->sense = sense;
rq                 94 block/blk-exec.c 		rq->sense_len = 0;
rq                 97 block/blk-exec.c 	rq->end_io_data = &wait;
rq                 98 block/blk-exec.c 	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
rq                101 block/blk-exec.c 	if (rq->errors)
rq                 48 block/blk-integrity.c 	rq_for_each_integrity_segment(iv, rq, iter) {
rq                 80 block/blk-integrity.c 	rq_for_each_integrity_segment(iv, rq, iter) {
rq                 15 block/blk-map.c 	if (!rq->bio)
rq                 16 block/blk-map.c 		blk_rq_bio_prep(q, rq, bio);
rq                 17 block/blk-map.c 	else if (!ll_back_merge_fn(q, rq, bio))
rq                 20 block/blk-map.c 		rq->biotail->bi_next = bio;
rq                 21 block/blk-map.c 		rq->biotail = bio;
rq                 23 block/blk-map.c 		rq->data_len += bio->bi_size;
rq                 51 block/blk-map.c 	reading = rq_data_dir(rq) == READ;
rq                 78 block/blk-map.c 	ret = blk_rq_append_bio(q, rq, bio);
rq                124 block/blk-map.c 		if (!map_data || rq_data_dir(rq) != READ)
rq                145 block/blk-map.c 		ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
rq                150 block/blk-map.c 			bio = rq->bio;
rq                156 block/blk-map.c 		rq->cmd_flags |= REQ_COPY_USER;
rq                158 block/blk-map.c 	rq->buffer = rq->data = NULL;
rq                162 block/blk-map.c 	rq->bio = NULL;
rq                195 block/blk-map.c 	int i, read = rq_data_dir(rq) == READ;
rq                226 block/blk-map.c 		rq->cmd_flags |= REQ_COPY_USER;
rq                230 block/blk-map.c 	blk_rq_bio_prep(q, rq, bio);
rq                231 block/blk-map.c 	rq->buffer = rq->data = NULL;
rq                283 block/blk-map.c 	int reading = rq_data_dir(rq) == READ;
rq                301 block/blk-map.c 	if (rq_data_dir(rq) == WRITE)
rq                305 block/blk-map.c 		rq->cmd_flags |= REQ_COPY_USER;
rq                307 block/blk-map.c 	blk_rq_bio_prep(q, rq, bio);
rq                308 block/blk-map.c 	blk_queue_bounce(q, &rq->bio);
rq                309 block/blk-map.c 	rq->buffer = rq->data = NULL;
rq                 14 block/blk-merge.c 	if (blk_fs_request(rq) || blk_discard_rq(rq)) {
rq                 15 block/blk-merge.c 		rq->hard_sector += nsect;
rq                 16 block/blk-merge.c 		rq->hard_nr_sectors -= nsect;
rq                 21 block/blk-merge.c 		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
rq                 22 block/blk-merge.c 		    (rq->sector <= rq->hard_sector)) {
rq                 23 block/blk-merge.c 			rq->sector = rq->hard_sector;
rq                 24 block/blk-merge.c 			rq->nr_sectors = rq->hard_nr_sectors;
rq                 25 block/blk-merge.c 			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
rq                 26 block/blk-merge.c 			rq->current_nr_sectors = rq->hard_cur_sectors;
rq                 27 block/blk-merge.c 			rq->buffer = bio_data(rq->bio);
rq                 34 block/blk-merge.c 		if (rq->nr_sectors < rq->current_nr_sectors) {
rq                 36 block/blk-merge.c 			rq->nr_sectors = rq->current_nr_sectors;
rq                 50 block/blk-merge.c 	struct request_queue *q = rq->q;
rq                 52 block/blk-merge.c 	if (!rq->bio)
rq                 58 block/blk-merge.c 	rq_for_each_segment(bv, rq, iter) {
rq                 86 block/blk-merge.c 	rq->nr_phys_segments = nr_phys_segs;
rq                 91 block/blk-merge.c 	struct request rq;
rq                 93 block/blk-merge.c 	rq.q = q;
rq                 94 block/blk-merge.c 	rq.bio = rq.biotail = bio;
rq                 96 block/blk-merge.c 	blk_recalc_rq_segments(&rq);
rq                 98 block/blk-merge.c 	bio->bi_phys_segments = rq.nr_phys_segments;
rq                148 block/blk-merge.c 	rq_for_each_segment(bvec, rq, iter) {
rq                187 block/blk-merge.c 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
rq                188 block/blk-merge.c 	    (rq->data_len & q->dma_pad_mask)) {
rq                189 block/blk-merge.c 		unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
rq                192 block/blk-merge.c 		rq->extra_len += pad_len;
rq                195 block/blk-merge.c 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
rq                196 block/blk-merge.c 		if (rq->cmd_flags & REQ_RW)
rq                206 block/blk-merge.c 		rq->extra_len += q->dma_drain_size;
rq                412 block/blk-merge.c 	struct request *next = elv_latter_request(q, rq);
rq                415 block/blk-merge.c 		return attempt_merge(q, rq, next);
rq                422 block/blk-merge.c 	struct request *prev = elv_former_request(q, rq);
rq                425 block/blk-merge.c 		return attempt_merge(q, prev, rq);
rq                 30 block/blk-softirq.c 		struct request *rq;
rq                 32 block/blk-softirq.c 		rq = list_entry(local_list.next, struct request, csd.list);
rq                 33 block/blk-softirq.c 		list_del_init(&rq->csd.list);
rq                 34 block/blk-softirq.c 		rq->q->softirq_done_fn(rq);
rq                 41 block/blk-softirq.c 	struct request *rq = data;
rq                 47 block/blk-softirq.c 	list_add_tail(&rq->csd.list, list);
rq                 49 block/blk-softirq.c 	if (list->next == &rq->csd.list)
rq                 61 block/blk-softirq.c 		struct call_single_data *data = &rq->csd;
rq                 64 block/blk-softirq.c 		data->info = rq;
rq                 41 block/blk-sysfs.c 	struct request_list *rl = &q->rq;
rq                305 block/blk-sysfs.c 	struct request_list *rl = &q->rq;
rq                285 block/blk-tag.c 	int tag = rq->tag;
rq                296 block/blk-tag.c 	list_del_init(&rq->queuelist);
rq                297 block/blk-tag.c 	rq->cmd_flags &= ~REQ_QUEUED;
rq                298 block/blk-tag.c 	rq->tag = -1;
rq                343 block/blk-tag.c 	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
rq                346 block/blk-tag.c 		       __func__, rq,
rq                347 block/blk-tag.c 		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
rq                359 block/blk-tag.c 	if (rq_is_sync(rq))
rq                375 block/blk-tag.c 	rq->cmd_flags |= REQ_QUEUED;
rq                376 block/blk-tag.c 	rq->tag = tag;
rq                377 block/blk-tag.c 	bqt->tag_index[tag] = rq;
rq                378 block/blk-tag.c 	blkdev_dequeue_request(rq);
rq                379 block/blk-tag.c 	list_add(&rq->queuelist, &q->tag_busy_list);
rq                122 block/blk-timeout.c 	struct request *rq, *tmp;
rq                126 block/blk-timeout.c 	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
rq                127 block/blk-timeout.c 		if (time_after_eq(jiffies, rq->deadline)) {
rq                128 block/blk-timeout.c 			list_del_init(&rq->timeout_list);
rq                133 block/blk-timeout.c 			if (blk_mark_rq_complete(rq))
rq                135 block/blk-timeout.c 			blk_rq_timed_out(rq);
rq                138 block/blk-timeout.c 			next = rq->deadline;
rq                140 block/blk-timeout.c 		} else if (time_after(next, rq->deadline))
rq                141 block/blk-timeout.c 			next = rq->deadline;
rq                226 block/blk-timeout.c 	struct request *rq, *tmp;
rq                232 block/blk-timeout.c 	list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
rq                233 block/blk-timeout.c 		blk_abort_request(rq);
rq                 37 block/blk.h    	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
rq                 42 block/blk.h    	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
rq                 82 block/bsg.c    	struct request *rq;
rq                179 block/bsg.c    		rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
rq                180 block/bsg.c    		if (!rq->cmd)
rq                184 block/bsg.c    	if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
rq                189 block/bsg.c    		if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm))
rq                197 block/bsg.c    	rq->cmd_len = hdr->request_len;
rq                198 block/bsg.c    	rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq                200 block/bsg.c    	rq->timeout = (hdr->timeout * HZ) / 1000;
rq                201 block/bsg.c    	if (!rq->timeout)
rq                202 block/bsg.c    		rq->timeout = q->sg_timeout;
rq                203 block/bsg.c    	if (!rq->timeout)
rq                204 block/bsg.c    		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq                248 block/bsg.c    	struct request *rq, *next_rq = NULL;
rq                264 block/bsg.c    	rq = blk_get_request(q, rw, GFP_KERNEL);
rq                265 block/bsg.c    	if (!rq)
rq                267 block/bsg.c    	ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
rq                282 block/bsg.c    		rq->next_rq = next_rq;
rq                283 block/bsg.c    		next_rq->cmd_type = rq->cmd_type;
rq                302 block/bsg.c    		ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
rq                307 block/bsg.c    	return rq;
rq                309 block/bsg.c    	if (rq->cmd != rq->__cmd)
rq                310 block/bsg.c    		kfree(rq->cmd);
rq                311 block/bsg.c    	blk_put_request(rq);
rq                325 block/bsg.c    	struct bsg_command *bc = rq->end_io_data;
rq                330 block/bsg.c    		bd->name, rq, bc, bc->bio, uptodate);
rq                349 block/bsg.c    	rq->sense = bc->sense;
rq                350 block/bsg.c    	rq->sense_len = 0;
rq                355 block/bsg.c    	bc->rq = rq;
rq                356 block/bsg.c    	bc->bio = rq->bio;
rq                357 block/bsg.c    	if (rq->next_rq)
rq                358 block/bsg.c    		bc->bidi_bio = rq->next_rq->bio;
rq                364 block/bsg.c    	dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
rq                366 block/bsg.c    	rq->end_io_data = bc;
rq                367 block/bsg.c    	blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
rq                420 block/bsg.c    	dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
rq                424 block/bsg.c    	hdr->device_status = status_byte(rq->errors);
rq                425 block/bsg.c    	hdr->transport_status = host_byte(rq->errors);
rq                426 block/bsg.c    	hdr->driver_status = driver_byte(rq->errors);
rq                432 block/bsg.c    	if (rq->sense_len && hdr->response) {
rq                434 block/bsg.c    					rq->sense_len);
rq                437 block/bsg.c    				   rq->sense, len);
rq                444 block/bsg.c    	if (rq->next_rq) {
rq                445 block/bsg.c    		hdr->dout_resid = rq->data_len;
rq                446 block/bsg.c    		hdr->din_resid = rq->next_rq->data_len;
rq                448 block/bsg.c    		blk_put_request(rq->next_rq);
rq                449 block/bsg.c    	} else if (rq_data_dir(rq) == READ)
rq                450 block/bsg.c    		hdr->din_resid = rq->data_len;
rq                452 block/bsg.c    		hdr->dout_resid = rq->data_len;
rq                460 block/bsg.c    	if (!ret && rq->errors < 0)
rq                461 block/bsg.c    		ret = rq->errors;
rq                464 block/bsg.c    	if (rq->cmd != rq->__cmd)
rq                465 block/bsg.c    		kfree(rq->cmd);
rq                466 block/bsg.c    	blk_put_request(rq);
rq                509 block/bsg.c    		tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
rq                544 block/bsg.c    		ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
rq                607 block/bsg.c    	struct request *rq;
rq                614 block/bsg.c    	rq = NULL;
rq                635 block/bsg.c    		rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm);
rq                636 block/bsg.c    		if (IS_ERR(rq)) {
rq                637 block/bsg.c    			ret = PTR_ERR(rq);
rq                638 block/bsg.c    			rq = NULL;
rq                642 block/bsg.c    		bsg_add_command(bd, q, bc, rq);
rq                644 block/bsg.c    		rq = NULL;
rq                763 block/bsg.c    	ret = blk_get_queue(rq);
rq                769 block/bsg.c    		blk_put_queue(rq);
rq                773 block/bsg.c    	bd->queue = rq;
rq                781 block/bsg.c    	strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1);
rq                919 block/bsg.c    		struct request *rq;
rq                926 block/bsg.c    		rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE);
rq                927 block/bsg.c    		if (IS_ERR(rq))
rq                928 block/bsg.c    			return PTR_ERR(rq);
rq                930 block/bsg.c    		bio = rq->bio;
rq                931 block/bsg.c    		if (rq->next_rq)
rq                932 block/bsg.c    			bidi_bio = rq->next_rq->bio;
rq                933 block/bsg.c    		blk_execute_rq(bd->queue, NULL, rq, 0);
rq                934 block/bsg.c    		ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
rq                 45 block/cfq-iosched.c 	((struct cfq_io_context *) (rq)->elevator_private)
rq                 46 block/cfq-iosched.c #define RQ_CFQQ(rq)		(struct cfq_queue *) ((rq)->elevator_private2)
rq                591 block/cfq-iosched.c 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
rq                593 block/cfq-iosched.c 	const int sync = rq_is_sync(rq);
rq                598 block/cfq-iosched.c 	elv_rb_del(&cfqq->sort_list, rq);
rq                606 block/cfq-iosched.c 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
rq                610 block/cfq-iosched.c 	cfqq->queued[rq_is_sync(rq)]++;
rq                616 block/cfq-iosched.c 	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
rq                625 block/cfq-iosched.c 	cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
rq                631 block/cfq-iosched.c 	elv_rb_del(&cfqq->sort_list, rq);
rq                632 block/cfq-iosched.c 	cfqq->queued[rq_is_sync(rq)]--;
rq                633 block/cfq-iosched.c 	cfq_add_rq_rb(rq);
rq                662 block/cfq-iosched.c 	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
rq                665 block/cfq-iosched.c 	cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
rq                674 block/cfq-iosched.c 	cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
rq                680 block/cfq-iosched.c 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
rq                682 block/cfq-iosched.c 	if (cfqq->next_rq == rq)
rq                683 block/cfq-iosched.c 		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
rq                685 block/cfq-iosched.c 	list_del_init(&rq->queuelist);
rq                686 block/cfq-iosched.c 	cfq_del_rq_rb(rq);
rq                689 block/cfq-iosched.c 	if (rq_is_meta(rq)) {
rq                727 block/cfq-iosched.c 	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
rq                728 block/cfq-iosched.c 	    time_before(next->start_time, rq->start_time))
rq                729 block/cfq-iosched.c 		list_move(&rq->queuelist, &next->queuelist);
rq                744 block/cfq-iosched.c 	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
rq                756 block/cfq-iosched.c 	if (cfqq == RQ_CFQQ(rq))
rq                846 block/cfq-iosched.c 	if (rq->sector >= cfqd->last_position)
rq                847 block/cfq-iosched.c 		return rq->sector - cfqd->last_position;
rq                849 block/cfq-iosched.c 		return cfqd->last_position - rq->sector;
rq                859 block/cfq-iosched.c 	return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
rq                940 block/cfq-iosched.c 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
rq                944 block/cfq-iosched.c 	cfq_remove_request(rq);
rq                946 block/cfq-iosched.c 	elv_dispatch_sort(q, rq);
rq                958 block/cfq-iosched.c 	struct request *rq;
rq                970 block/cfq-iosched.c 	rq = rq_entry_fifo(cfqq->fifo.next);
rq                972 block/cfq-iosched.c 	if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
rq                973 block/cfq-iosched.c 		rq = NULL;
rq                975 block/cfq-iosched.c 	cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
rq                976 block/cfq-iosched.c 	return rq;
rq               1046 block/cfq-iosched.c 		struct request *rq;
rq               1051 block/cfq-iosched.c 		rq = cfq_check_fifo(cfqq);
rq               1052 block/cfq-iosched.c 		if (rq == NULL)
rq               1053 block/cfq-iosched.c 			rq = cfqq->next_rq;
rq               1058 block/cfq-iosched.c 		cfq_dispatch_insert(cfqd->queue, rq);
rq               1063 block/cfq-iosched.c 			atomic_inc(&RQ_CIC(rq)->ioc->refcount);
rq               1064 block/cfq-iosched.c 			cfqd->active_cic = RQ_CIC(rq);
rq               1705 block/cfq-iosched.c 	if (cic->last_request_pos < rq->sector)
rq               1706 block/cfq-iosched.c 		sdist = rq->sector - cic->last_request_pos;
rq               1708 block/cfq-iosched.c 		sdist = cic->last_request_pos - rq->sector;
rq               1790 block/cfq-iosched.c 	if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
rq               1797 block/cfq-iosched.c 	if (rq_is_meta(rq) && !cfqq->meta_pending)
rq               1807 block/cfq-iosched.c 	if (cfq_rq_close(cfqd, rq))
rq               1842 block/cfq-iosched.c 	struct cfq_io_context *cic = RQ_CIC(rq);
rq               1845 block/cfq-iosched.c 	if (rq_is_meta(rq))
rq               1849 block/cfq-iosched.c 	cfq_update_io_seektime(cfqd, cic, rq);
rq               1852 block/cfq-iosched.c 	cic->last_request_pos = rq->sector + rq->nr_sectors;
rq               1865 block/cfq-iosched.c 	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
rq               1880 block/cfq-iosched.c 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
rq               1883 block/cfq-iosched.c 	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
rq               1885 block/cfq-iosched.c 	cfq_add_rq_rb(rq);
rq               1887 block/cfq-iosched.c 	list_add_tail(&rq->queuelist, &cfqq->fifo);
rq               1889 block/cfq-iosched.c 	cfq_rq_enqueued(cfqd, cfqq, rq);
rq               1919 block/cfq-iosched.c 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
rq               1921 block/cfq-iosched.c 	const int sync = rq_is_sync(rq);
rq               1941 block/cfq-iosched.c 		RQ_CIC(rq)->last_end_request = now;
rq               2032 block/cfq-iosched.c 	struct cfq_queue *cfqq = RQ_CFQQ(rq);
rq               2035 block/cfq-iosched.c 		const int rw = rq_data_dir(rq);
rq               2040 block/cfq-iosched.c 		put_io_context(RQ_CIC(rq)->ioc);
rq               2042 block/cfq-iosched.c 		rq->elevator_private = NULL;
rq               2043 block/cfq-iosched.c 		rq->elevator_private2 = NULL;
rq               2057 block/cfq-iosched.c 	const int rw = rq_data_dir(rq);
rq               2058 block/cfq-iosched.c 	const int is_sync = rq_is_sync(rq);
rq               2087 block/cfq-iosched.c 	rq->elevator_private = cic;
rq               2088 block/cfq-iosched.c 	rq->elevator_private2 = cfqq;
rq                 59 block/deadline-iosched.c 	return &dd->sort_list[rq_data_dir(rq)];
rq                 68 block/deadline-iosched.c 	struct rb_node *node = rb_next(&rq->rb_node);
rq                 79 block/deadline-iosched.c 	struct rb_root *root = deadline_rb_root(dd, rq);
rq                 82 block/deadline-iosched.c 	while (unlikely(__alias = elv_rb_add(root, rq)))
rq                 89 block/deadline-iosched.c 	const int data_dir = rq_data_dir(rq);
rq                 91 block/deadline-iosched.c 	if (dd->next_rq[data_dir] == rq)
rq                 92 block/deadline-iosched.c 		dd->next_rq[data_dir] = deadline_latter_request(rq);
rq                 94 block/deadline-iosched.c 	elv_rb_del(deadline_rb_root(dd, rq), rq);
rq                104 block/deadline-iosched.c 	const int data_dir = rq_data_dir(rq);
rq                106 block/deadline-iosched.c 	deadline_add_rq_rb(dd, rq);
rq                111 block/deadline-iosched.c 	rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
rq                112 block/deadline-iosched.c 	list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
rq                122 block/deadline-iosched.c 	rq_fifo_clear(rq);
rq                123 block/deadline-iosched.c 	deadline_del_rq_rb(dd, rq);
rq                197 block/deadline-iosched.c 	struct request_queue *q = rq->q;
rq                199 block/deadline-iosched.c 	deadline_remove_request(q, rq);
rq                200 block/deadline-iosched.c 	elv_dispatch_add_tail(q, rq);
rq                209 block/deadline-iosched.c 	const int data_dir = rq_data_dir(rq);
rq                213 block/deadline-iosched.c 	dd->next_rq[data_dir] = deadline_latter_request(rq);
rq                215 block/deadline-iosched.c 	dd->last_sector = rq_end_sector(rq);
rq                221 block/deadline-iosched.c 	deadline_move_to_dispatch(dd, rq);
rq                230 block/deadline-iosched.c 	struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
rq                235 block/deadline-iosched.c 	if (time_after(jiffies, rq_fifo_time(rq)))
rq                250 block/deadline-iosched.c 	struct request *rq;
rq                257 block/deadline-iosched.c 		rq = dd->next_rq[WRITE];
rq                259 block/deadline-iosched.c 		rq = dd->next_rq[READ];
rq                261 block/deadline-iosched.c 	if (rq && dd->batching < dd->fifo_batch)
rq                308 block/deadline-iosched.c 		rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
rq                314 block/deadline-iosched.c 		rq = dd->next_rq[data_dir];
rq                324 block/deadline-iosched.c 	deadline_move_request(dd, rq);
rq                 52 block/elevator.c #define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
rq                 53 block/elevator.c #define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
rq                 61 block/elevator.c 	struct request_queue *q = rq->q;
rq                 65 block/elevator.c 		return e->ops->elevator_allow_merge_fn(q, rq, bio);
rq                 75 block/elevator.c 	if (!rq_mergeable(rq))
rq                 81 block/elevator.c 	if (bio_discard(bio) != bio_discard(rq->bio))
rq                 87 block/elevator.c 	if (bio_data_dir(bio) != rq_data_dir(rq))
rq                 93 block/elevator.c 	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
rq                 99 block/elevator.c 	if (bio_integrity(bio) != blk_integrity_rq(rq))
rq                102 block/elevator.c 	if (!elv_iosched_allow_merge(rq, bio))
rq                311 block/elevator.c 		e->ops->elevator_activate_req_fn(q, rq);
rq                319 block/elevator.c 		e->ops->elevator_deactivate_req_fn(q, rq);
rq                324 block/elevator.c 	hlist_del_init(&rq->hash);
rq                329 block/elevator.c 	if (ELV_ON_HASH(rq))
rq                330 block/elevator.c 		__elv_rqhash_del(rq);
rq                337 block/elevator.c 	BUG_ON(ELV_ON_HASH(rq));
rq                338 block/elevator.c 	hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
rq                343 block/elevator.c 	__elv_rqhash_del(rq);
rq                344 block/elevator.c 	elv_rqhash_add(q, rq);
rq                352 block/elevator.c 	struct request *rq;
rq                354 block/elevator.c 	hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
rq                355 block/elevator.c 		BUG_ON(!ELV_ON_HASH(rq));
rq                357 block/elevator.c 		if (unlikely(!rq_mergeable(rq))) {
rq                358 block/elevator.c 			__elv_rqhash_del(rq);
rq                362 block/elevator.c 		if (rq_hash_key(rq) == offset)
rq                363 block/elevator.c 			return rq;
rq                383 block/elevator.c 		if (rq->sector < __rq->sector)
rq                385 block/elevator.c 		else if (rq->sector > __rq->sector)
rq                391 block/elevator.c 	rb_link_node(&rq->rb_node, parent, p);
rq                392 block/elevator.c 	rb_insert_color(&rq->rb_node, root);
rq                399 block/elevator.c 	BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
rq                400 block/elevator.c 	rb_erase(&rq->rb_node, root);
rq                401 block/elevator.c 	RB_CLEAR_NODE(&rq->rb_node);
rq                408 block/elevator.c 	struct request *rq;
rq                411 block/elevator.c 		rq = rb_entry(n, struct request, rb_node);
rq                413 block/elevator.c 		if (sector < rq->sector)
rq                415 block/elevator.c 		else if (sector > rq->sector)
rq                418 block/elevator.c 			return rq;
rq                436 block/elevator.c 	if (q->last_merge == rq)
rq                439 block/elevator.c 	elv_rqhash_del(q, rq);
rq                448 block/elevator.c 		if (blk_discard_rq(rq) != blk_discard_rq(pos))
rq                450 block/elevator.c 		if (rq_data_dir(rq) != rq_data_dir(pos))
rq                454 block/elevator.c 		if (rq->sector >= boundary) {
rq                461 block/elevator.c 		if (rq->sector >= pos->sector)
rq                465 block/elevator.c 	list_add(&rq->queuelist, entry);
rq                476 block/elevator.c 	if (q->last_merge == rq)
rq                479 block/elevator.c 	elv_rqhash_del(q, rq);
rq                483 block/elevator.c 	q->end_sector = rq_end_sector(rq);
rq                484 block/elevator.c 	q->boundary_rq = rq;
rq                485 block/elevator.c 	list_add_tail(&rq->queuelist, &q->queue_head);
rq                529 block/elevator.c 		e->ops->elevator_merged_fn(q, rq, type);
rq                532 block/elevator.c 		elv_rqhash_reposition(q, rq);
rq                534 block/elevator.c 	q->last_merge = rq;
rq                543 block/elevator.c 		e->ops->elevator_merge_req_fn(q, rq, next);
rq                545 block/elevator.c 	elv_rqhash_reposition(q, rq);
rq                549 block/elevator.c 	q->last_merge = rq;
rq                558 block/elevator.c 	if (blk_account_rq(rq)) {
rq                560 block/elevator.c 		if (blk_sorted_rq(rq))
rq                561 block/elevator.c 			elv_deactivate_rq(q, rq);
rq                564 block/elevator.c 	rq->cmd_flags &= ~REQ_STARTED;
rq                566 block/elevator.c 	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
rq                589 block/elevator.c 	blk_add_trace_rq(q, rq, BLK_TA_INSERT);
rq                591 block/elevator.c 	rq->q = q;
rq                595 block/elevator.c 		rq->cmd_flags |= REQ_SOFTBARRIER;
rq                597 block/elevator.c 		list_add(&rq->queuelist, &q->queue_head);
rq                601 block/elevator.c 		rq->cmd_flags |= REQ_SOFTBARRIER;
rq                603 block/elevator.c 		list_add_tail(&rq->queuelist, &q->queue_head);
rq                619 block/elevator.c 		BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
rq                620 block/elevator.c 		rq->cmd_flags |= REQ_SORTED;
rq                622 block/elevator.c 		if (rq_mergeable(rq)) {
rq                623 block/elevator.c 			elv_rqhash_add(q, rq);
rq                625 block/elevator.c 				q->last_merge = rq;
rq                633 block/elevator.c 		q->elevator->ops->elevator_add_req_fn(q, rq);
rq                642 block/elevator.c 		rq->cmd_flags |= REQ_SOFTBARRIER;
rq                651 block/elevator.c 			list_add(&rq->queuelist, &q->queue_head);
rq                655 block/elevator.c 		ordseq = blk_ordered_req_seq(rq);
rq                663 block/elevator.c 		list_add_tail(&rq->queuelist, pos);
rq                673 block/elevator.c 		int nrq = q->rq.count[READ] + q->rq.count[WRITE]
rq                685 block/elevator.c 		rq->cmd_flags |= REQ_ORDERED_COLOR;
rq                687 block/elevator.c 	if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
rq                691 block/elevator.c 		if (blk_barrier_rq(rq))
rq                704 block/elevator.c 		if (blk_fs_request(rq) || blk_discard_rq(rq)) {
rq                705 block/elevator.c 			q->end_sector = rq_end_sector(rq);
rq                706 block/elevator.c 			q->boundary_rq = rq;
rq                708 block/elevator.c 	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
rq                715 block/elevator.c 	elv_insert(q, rq, where);
rq                725 block/elevator.c 	__elv_add_request(q, rq, where, plug);
rq                732 block/elevator.c 	struct request *rq;
rq                736 block/elevator.c 			rq = list_entry_rq(q->queue_head.next);
rq                737 block/elevator.c 			if (blk_do_ordered(q, &rq))
rq                738 block/elevator.c 				return rq;
rq                748 block/elevator.c 	struct request *rq;
rq                751 block/elevator.c 	while ((rq = __elv_next_request(q)) != NULL) {
rq                756 block/elevator.c 		if (blk_empty_barrier(rq)) {
rq                757 block/elevator.c 			__blk_end_request(rq, 0, blk_rq_bytes(rq));
rq                760 block/elevator.c 		if (!(rq->cmd_flags & REQ_STARTED)) {
rq                766 block/elevator.c 			if (blk_sorted_rq(rq))
rq                767 block/elevator.c 				elv_activate_rq(q, rq);
rq                774 block/elevator.c 			rq->cmd_flags |= REQ_STARTED;
rq                775 block/elevator.c 			blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
rq                781 block/elevator.c 			blk_add_timer(rq);
rq                784 block/elevator.c 		if (!q->boundary_rq || q->boundary_rq == rq) {
rq                785 block/elevator.c 			q->end_sector = rq_end_sector(rq);
rq                789 block/elevator.c 		if (rq->cmd_flags & REQ_DONTPREP)
rq                792 block/elevator.c 		if (q->dma_drain_size && rq->data_len) {
rq                799 block/elevator.c 			rq->nr_phys_segments++;
rq                805 block/elevator.c 		ret = q->prep_rq_fn(q, rq);
rq                815 block/elevator.c 			if (q->dma_drain_size && rq->data_len &&
rq                816 block/elevator.c 			    !(rq->cmd_flags & REQ_DONTPREP)) {
rq                821 block/elevator.c 				--rq->nr_phys_segments;
rq                824 block/elevator.c 			rq = NULL;
rq                827 block/elevator.c 			rq->cmd_flags |= REQ_QUIET;
rq                828 block/elevator.c 			__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
rq                835 block/elevator.c 	return rq;
rq                841 block/elevator.c 	BUG_ON(list_empty(&rq->queuelist));
rq                842 block/elevator.c 	BUG_ON(ELV_ON_HASH(rq));
rq                844 block/elevator.c 	list_del_init(&rq->queuelist);
rq                851 block/elevator.c 	if (blk_account_rq(rq))
rq                875 block/elevator.c 		return e->ops->elevator_latter_req_fn(q, rq);
rq                884 block/elevator.c 		return e->ops->elevator_former_req_fn(q, rq);
rq                893 block/elevator.c 		return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
rq                895 block/elevator.c 	rq->elevator_private = NULL;
rq                904 block/elevator.c 		e->ops->elevator_put_req_fn(rq);
rq                919 block/elevator.c 	struct request *rq;
rq                922 block/elevator.c 		rq = list_entry_rq(q->queue_head.next);
rq                923 block/elevator.c 		rq->cmd_flags |= REQ_QUIET;
rq                924 block/elevator.c 		blk_add_trace_rq(q, rq, BLK_TA_ABORT);
rq                925 block/elevator.c 		__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
rq                937 block/elevator.c 	if (blk_account_rq(rq)) {
rq                939 block/elevator.c 		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
rq                940 block/elevator.c 			e->ops->elevator_completed_req_fn(q, rq);
rq               1111 block/elevator.c 	while (q->rq.elvpriv) {
rq               1219 block/elevator.c 	struct rb_node *rbprev = rb_prev(&rq->rb_node);
rq               1231 block/elevator.c 	struct rb_node *rbnext = rb_next(&rq->rb_node);
rq                 25 block/noop-iosched.c 		struct request *rq;
rq                 26 block/noop-iosched.c 		rq = list_entry(nd->queue.next, struct request, queuelist);
rq                 27 block/noop-iosched.c 		list_del_init(&rq->queuelist);
rq                 28 block/noop-iosched.c 		elv_dispatch_sort(q, rq);
rq                 38 block/noop-iosched.c 	list_add_tail(&rq->queuelist, &nd->queue);
rq                 53 block/noop-iosched.c 	if (rq->queuelist.prev == &nd->queue)
rq                 55 block/noop-iosched.c 	return list_entry(rq->queuelist.prev, struct request, queuelist);
rq                 63 block/noop-iosched.c 	if (rq->queuelist.next == &nd->queue)
rq                 65 block/noop-iosched.c 	return list_entry(rq->queuelist.next, struct request, queuelist);
rq                195 block/scsi_ioctl.c 	if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
rq                197 block/scsi_ioctl.c 	if (blk_verify_command(&q->cmd_filter, rq->cmd,
rq                204 block/scsi_ioctl.c 	rq->cmd_len = hdr->cmd_len;
rq                205 block/scsi_ioctl.c 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq                207 block/scsi_ioctl.c 	rq->timeout = msecs_to_jiffies(hdr->timeout);
rq                208 block/scsi_ioctl.c 	if (!rq->timeout)
rq                209 block/scsi_ioctl.c 		rq->timeout = q->sg_timeout;
rq                210 block/scsi_ioctl.c 	if (!rq->timeout)
rq                211 block/scsi_ioctl.c 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq                222 block/scsi_ioctl.c 	blk_rq_unmap_user(rq->bio);
rq                223 block/scsi_ioctl.c 	blk_put_request(rq);
rq                235 block/scsi_ioctl.c 	hdr->status = rq->errors & 0xff;
rq                236 block/scsi_ioctl.c 	hdr->masked_status = status_byte(rq->errors);
rq                237 block/scsi_ioctl.c 	hdr->msg_status = msg_byte(rq->errors);
rq                238 block/scsi_ioctl.c 	hdr->host_status = host_byte(rq->errors);
rq                239 block/scsi_ioctl.c 	hdr->driver_status = driver_byte(rq->errors);
rq                243 block/scsi_ioctl.c 	hdr->resid = rq->data_len;
rq                246 block/scsi_ioctl.c 	if (rq->sense_len && hdr->sbp) {
rq                247 block/scsi_ioctl.c 		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
rq                249 block/scsi_ioctl.c 		if (!copy_to_user(hdr->sbp, rq->sense, len))
rq                255 block/scsi_ioctl.c 	rq->bio = bio;
rq                256 block/scsi_ioctl.c 	r = blk_unmap_sghdr_rq(rq, hdr);
rq                268 block/scsi_ioctl.c 	struct request *rq;
rq                292 block/scsi_ioctl.c 	rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
rq                293 block/scsi_ioctl.c 	if (!rq)
rq                296 block/scsi_ioctl.c 	if (blk_fill_sghdr_rq(q, rq, hdr, file)) {
rq                297 block/scsi_ioctl.c 		blk_put_request(rq);
rq                317 block/scsi_ioctl.c 		ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
rq                321 block/scsi_ioctl.c 		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
rq                327 block/scsi_ioctl.c 	bio = rq->bio;
rq                329 block/scsi_ioctl.c 	rq->sense = sense;
rq                330 block/scsi_ioctl.c 	rq->sense_len = 0;
rq                331 block/scsi_ioctl.c 	rq->retries = 0;
rq                339 block/scsi_ioctl.c 	blk_execute_rq(q, bd_disk, rq, 0);
rq                343 block/scsi_ioctl.c 	return blk_complete_sghdr_rq(rq, hdr, bio);
rq                345 block/scsi_ioctl.c 	blk_put_request(rq);
rq                386 block/scsi_ioctl.c 	struct request *rq;
rq                414 block/scsi_ioctl.c 	rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
rq                422 block/scsi_ioctl.c 	rq->cmd_len = cmdlen;
rq                423 block/scsi_ioctl.c 	if (copy_from_user(rq->cmd, sic->data, cmdlen))
rq                433 block/scsi_ioctl.c 	err = blk_verify_command(&q->cmd_filter, rq->cmd, write_perm);
rq                438 block/scsi_ioctl.c 	rq->retries = 5;
rq                443 block/scsi_ioctl.c 		rq->timeout = FORMAT_UNIT_TIMEOUT;
rq                444 block/scsi_ioctl.c 		rq->retries = 1;
rq                447 block/scsi_ioctl.c 		rq->timeout = START_STOP_TIMEOUT;
rq                450 block/scsi_ioctl.c 		rq->timeout = MOVE_MEDIUM_TIMEOUT;
rq                453 block/scsi_ioctl.c 		rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
rq                456 block/scsi_ioctl.c 		rq->timeout = READ_DEFECT_DATA_TIMEOUT;
rq                457 block/scsi_ioctl.c 		rq->retries = 1;
rq                460 block/scsi_ioctl.c 		rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq                464 block/scsi_ioctl.c 	if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
rq                470 block/scsi_ioctl.c 	rq->sense = sense;
rq                471 block/scsi_ioctl.c 	rq->sense_len = 0;
rq                472 block/scsi_ioctl.c 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq                474 block/scsi_ioctl.c 	blk_execute_rq(q, disk, rq, 0);
rq                477 block/scsi_ioctl.c 	err = rq->errors & 0xff;	/* only 8 bit SCSI status */
rq                479 block/scsi_ioctl.c 		if (rq->sense_len && rq->sense) {
rq                480 block/scsi_ioctl.c 			bytes = (OMAX_SB_LEN > rq->sense_len) ?
rq                481 block/scsi_ioctl.c 				rq->sense_len : OMAX_SB_LEN;
rq                482 block/scsi_ioctl.c 			if (copy_to_user(sic->data, rq->sense, bytes))
rq                492 block/scsi_ioctl.c 	blk_put_request(rq);
rq                501 block/scsi_ioctl.c 	struct request *rq;
rq                504 block/scsi_ioctl.c 	rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq                505 block/scsi_ioctl.c 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq                506 block/scsi_ioctl.c 	rq->data = NULL;
rq                507 block/scsi_ioctl.c 	rq->data_len = 0;
rq                508 block/scsi_ioctl.c 	rq->extra_len = 0;
rq                509 block/scsi_ioctl.c 	rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq                510 block/scsi_ioctl.c 	rq->cmd[0] = cmd;
rq                511 block/scsi_ioctl.c 	rq->cmd[4] = data;
rq                512 block/scsi_ioctl.c 	rq->cmd_len = 6;
rq                513 block/scsi_ioctl.c 	err = blk_execute_rq(q, bd_disk, rq, 0);
rq                514 block/scsi_ioctl.c 	blk_put_request(rq);
rq                134 fs/dlm/lock.c  	__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
rq               1798 fs/dlm/lock.c  	if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
rq               1799 fs/dlm/lock.c  	    (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
rq               1805 fs/dlm/lock.c  	if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
rq                215 fs/ncpfs/sock.c 	struct ncp_request_reply *rq;
rq                220 fs/ncpfs/sock.c 	rq = server->tx.creq;
rq                221 fs/ncpfs/sock.c 	if (!rq)
rq                225 fs/ncpfs/sock.c 	memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0]));
rq                226 fs/ncpfs/sock.c 	result = do_send(server->ncp_sock, iovc, rq->tx_iovlen,
rq                227 fs/ncpfs/sock.c 			 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT);
rq                234 fs/ncpfs/sock.c 		__ncp_abort_request(server, rq, result);
rq                237 fs/ncpfs/sock.c 	if (result >= rq->tx_totallen) {
rq                238 fs/ncpfs/sock.c 		server->rcv.creq = rq;
rq                242 fs/ncpfs/sock.c 	rq->tx_totallen -= result;
rq                243 fs/ncpfs/sock.c 	iov = rq->tx_ciov;
rq                247 fs/ncpfs/sock.c 		rq->tx_iovlen--;
rq                251 fs/ncpfs/sock.c 	rq->tx_ciov = iov;
rq                317 include/linux/blkdev.h 	struct request_list	rq;
rq                558 include/linux/blkdev.h #define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS)
rq                559 include/linux/blkdev.h #define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
rq                560 include/linux/blkdev.h #define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL)
rq                561 include/linux/blkdev.h #define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE)
rq                563 include/linux/blkdev.h #define blk_noretry_request(rq)	((rq)->cmd_flags & REQ_FAILFAST)
rq                564 include/linux/blkdev.h #define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED)
rq                566 include/linux/blkdev.h #define blk_account_rq(rq)	(blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 
rq                568 include/linux/blkdev.h #define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
rq                569 include/linux/blkdev.h #define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME)
rq                571 include/linux/blkdev.h 	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
rq                573 include/linux/blkdev.h #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
rq                574 include/linux/blkdev.h #define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED)
rq                575 include/linux/blkdev.h #define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER)
rq                576 include/linux/blkdev.h #define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA)
rq                577 include/linux/blkdev.h #define blk_discard_rq(rq)	((rq)->cmd_flags & REQ_DISCARD)
rq                578 include/linux/blkdev.h #define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
rq                579 include/linux/blkdev.h #define blk_empty_barrier(rq)	(blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
rq                581 include/linux/blkdev.h #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist))
rq                585 include/linux/blkdev.h #define rq_data_dir(rq)		((rq)->cmd_flags & 1)
rq                590 include/linux/blkdev.h #define rq_is_sync(rq)		(rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
rq                591 include/linux/blkdev.h #define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META)
rq                624 include/linux/blkdev.h 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
rq                625 include/linux/blkdev.h 	 (blk_discard_rq(rq) || blk_fs_request((rq))))
rq                683 include/linux/blkdev.h 	if ((rq->bio))			\
rq                684 include/linux/blkdev.h 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
rq                870 include/linux/blkdev.h #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED)
rq               1032 include/linux/blkdev.h 	if (rq->bio == NULL)
rq               1035 include/linux/blkdev.h 	return bio_integrity(rq->bio);
rq                200 include/linux/blktrace_api.h 	int rw = rq->cmd_flags & 0x03;
rq                205 include/linux/blktrace_api.h 	if (blk_discard_rq(rq))
rq                208 include/linux/blktrace_api.h 	if (blk_pc_request(rq)) {
rq                210 include/linux/blktrace_api.h 		__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
rq                213 include/linux/blktrace_api.h 		__blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
rq                173 include/linux/elevator.h #define rq_end_sector(rq)	((rq)->sector + (rq)->nr_sectors)
rq                180 include/linux/elevator.h #define rq_fifo_time(rq)	((unsigned long) (rq)->csd.list.next)
rq                181 include/linux/elevator.h #define rq_set_fifo_time(rq,exp)	((rq)->csd.list.next = (void *) (exp))
rq                184 include/linux/elevator.h 	list_del_init(&(rq)->queuelist);	\
rq                185 include/linux/elevator.h 	INIT_LIST_HEAD(&(rq)->csd.list);	\
rq                387 include/linux/ide.h 	struct request		*rq;		/* copy of request */
rq                432 include/linux/ide.h 	struct request *rq;
rq                590 include/linux/ide.h 	struct request		*rq;	/* current request */
rq                885 include/linux/ide.h 	struct request *rq;
rq               1119 include/linux/ide.h 	ide_startstop_t	(*error)(ide_drive_t *, struct request *rq, u8, u8);
rq                214 include/linux/isdn_ppp.h   struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */
rq                183 include/linux/mii.h 	return (struct mii_ioctl_data *) &rq->ifr_ifru;
rq                892 include/linux/sched.h struct rq;
rq                898 include/linux/sched.h 	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
rq                899 include/linux/sched.h 	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
rq                900 include/linux/sched.h 	void (*yield_task) (struct rq *rq);
rq                903 include/linux/sched.h 	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
rq                905 include/linux/sched.h 	struct task_struct * (*pick_next_task) (struct rq *rq);
rq                906 include/linux/sched.h 	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
rq                909 include/linux/sched.h 	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
rq                910 include/linux/sched.h 			struct rq *busiest, unsigned long max_load_move,
rq                914 include/linux/sched.h 	int (*move_one_task) (struct rq *this_rq, int this_cpu,
rq                915 include/linux/sched.h 			      struct rq *busiest, struct sched_domain *sd,
rq                917 include/linux/sched.h 	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
rq                918 include/linux/sched.h 	void (*post_schedule) (struct rq *this_rq);
rq                919 include/linux/sched.h 	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
rq                922 include/linux/sched.h 	void (*set_curr_task) (struct rq *rq);
rq                923 include/linux/sched.h 	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
rq                924 include/linux/sched.h 	void (*task_new) (struct rq *rq, struct task_struct *p);
rq                928 include/linux/sched.h 	void (*rq_online)(struct rq *rq);
rq                929 include/linux/sched.h 	void (*rq_offline)(struct rq *rq);
rq                931 include/linux/sched.h 	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
rq                933 include/linux/sched.h 	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
rq                935 include/linux/sched.h 	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
rq                 99 include/linux/sunrpc/svcauth.h 	int	(*accept)(struct svc_rqst *rq, __be32 *authp);
rq                100 include/linux/sunrpc/svcauth.h 	int	(*release)(struct svc_rqst *rq);
rq                102 include/linux/sunrpc/svcauth.h 	int	(*set_client)(struct svc_rqst *rq);
rq                405 kernel/sched.c 	struct rq *rq;	/* cpu runqueue to which this cfs_rq is attached */
rq                465 kernel/sched.c 	struct rq *rq;
rq                614 kernel/sched.c 	rq->curr->sched_class->check_preempt_curr(rq, p, sync);
rq                620 kernel/sched.c 	return rq->cpu;
rq                643 kernel/sched.c 	rq->clock = sched_clock_cpu(cpu_of(rq));
rq                665 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq                668 kernel/sched.c 	ret = spin_is_locked(&rq->lock);
rq                856 kernel/sched.c 	return rq->curr == p;
rq                862 kernel/sched.c 	return task_current(rq, p);
rq                873 kernel/sched.c 	rq->lock.owner = current;
rq                880 kernel/sched.c 	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
rq                882 kernel/sched.c 	spin_unlock_irq(&rq->lock);
rq                891 kernel/sched.c 	return task_current(rq, p);
rq                906 kernel/sched.c 	spin_unlock_irq(&rq->lock);
rq                908 kernel/sched.c 	spin_unlock(&rq->lock);
rq                933 kernel/sched.c static inline struct rq *__task_rq_lock(struct task_struct *p)
rq                937 kernel/sched.c 		struct rq *rq = task_rq(p);
rq                938 kernel/sched.c 		spin_lock(&rq->lock);
rq                939 kernel/sched.c 		if (likely(rq == task_rq(p)))
rq                940 kernel/sched.c 			return rq;
rq                941 kernel/sched.c 		spin_unlock(&rq->lock);
rq                950 kernel/sched.c static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
rq                953 kernel/sched.c 	struct rq *rq;
rq                957 kernel/sched.c 		rq = task_rq(p);
rq                958 kernel/sched.c 		spin_lock(&rq->lock);
rq                959 kernel/sched.c 		if (likely(rq == task_rq(p)))
rq                960 kernel/sched.c 			return rq;
rq                961 kernel/sched.c 		spin_unlock_irqrestore(&rq->lock, *flags);
rq                968 kernel/sched.c 	spin_unlock(&rq->lock);
rq                974 kernel/sched.c 	spin_unlock_irqrestore(&rq->lock, *flags);
rq                980 kernel/sched.c static struct rq *this_rq_lock(void)
rq                983 kernel/sched.c 	struct rq *rq;
rq                986 kernel/sched.c 	rq = this_rq();
rq                987 kernel/sched.c 	spin_lock(&rq->lock);
rq                989 kernel/sched.c 	return rq;
rq               1013 kernel/sched.c 	if (!cpu_active(cpu_of(rq)))
rq               1015 kernel/sched.c 	return hrtimer_is_hres_active(&rq->hrtick_timer);
rq               1020 kernel/sched.c 	if (hrtimer_active(&rq->hrtick_timer))
rq               1021 kernel/sched.c 		hrtimer_cancel(&rq->hrtick_timer);
rq               1030 kernel/sched.c 	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
rq               1032 kernel/sched.c 	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
rq               1034 kernel/sched.c 	spin_lock(&rq->lock);
rq               1035 kernel/sched.c 	update_rq_clock(rq);
rq               1036 kernel/sched.c 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);
rq               1037 kernel/sched.c 	spin_unlock(&rq->lock);
rq               1048 kernel/sched.c 	struct rq *rq = arg;
rq               1050 kernel/sched.c 	spin_lock(&rq->lock);
rq               1051 kernel/sched.c 	hrtimer_restart(&rq->hrtick_timer);
rq               1052 kernel/sched.c 	rq->hrtick_csd_pending = 0;
rq               1053 kernel/sched.c 	spin_unlock(&rq->lock);
rq               1063 kernel/sched.c 	struct hrtimer *timer = &rq->hrtick_timer;
rq               1068 kernel/sched.c 	if (rq == this_rq()) {
rq               1070 kernel/sched.c 	} else if (!rq->hrtick_csd_pending) {
rq               1071 kernel/sched.c 		__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
rq               1072 kernel/sched.c 		rq->hrtick_csd_pending = 1;
rq               1107 kernel/sched.c 	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
rq               1118 kernel/sched.c 	rq->hrtick_csd_pending = 0;
rq               1120 kernel/sched.c 	rq->hrtick_csd.flags = 0;
rq               1121 kernel/sched.c 	rq->hrtick_csd.func = __hrtick_start;
rq               1122 kernel/sched.c 	rq->hrtick_csd.info = rq;
rq               1125 kernel/sched.c 	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq               1126 kernel/sched.c 	rq->hrtick_timer.function = hrtick;
rq               1127 kernel/sched.c 	rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
rq               1179 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               1182 kernel/sched.c 	if (!spin_trylock_irqsave(&rq->lock, flags))
rq               1185 kernel/sched.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq               1201 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               1213 kernel/sched.c 	if (rq->curr != rq->idle)
rq               1221 kernel/sched.c 	set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED);
rq               1225 kernel/sched.c 	if (!tsk_is_polling(rq->idle))
rq               1380 kernel/sched.c 	update_load_add(&rq->load, load);
rq               1385 kernel/sched.c 	update_load_sub(&rq->load, load);
rq               1440 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               1442 kernel/sched.c 	if (rq->nr_running)
rq               1443 kernel/sched.c 		rq->avg_load_per_task = rq->load.weight / rq->nr_running;
rq               1445 kernel/sched.c 	return rq->avg_load_per_task;
rq               1530 kernel/sched.c 		struct rq *rq = cpu_rq(i);
rq               1533 kernel/sched.c 		spin_lock_irqsave(&rq->lock, flags);
rq               1535 kernel/sched.c 		spin_unlock_irqrestore(&rq->lock, flags);
rq               1577 kernel/sched.c 	spin_unlock(&rq->lock);
rq               1579 kernel/sched.c 	spin_lock(&rq->lock);
rq               1624 kernel/sched.c 	rq->nr_running++;
rq               1629 kernel/sched.c 	rq->nr_running--;
rq               1662 kernel/sched.c 	p->sched_class->enqueue_task(rq, p, wakeup);
rq               1675 kernel/sched.c 	p->sched_class->dequeue_task(rq, p, sleep);
rq               1731 kernel/sched.c 		rq->nr_uninterruptible--;
rq               1733 kernel/sched.c 	enqueue_task(rq, p, wakeup);
rq               1734 kernel/sched.c 	inc_nr_running(rq);
rq               1743 kernel/sched.c 		rq->nr_uninterruptible++;
rq               1745 kernel/sched.c 	dequeue_task(rq, p, sleep);
rq               1746 kernel/sched.c 	dec_nr_running(rq);
rq               1778 kernel/sched.c 			prev_class->switched_from(rq, p, running);
rq               1779 kernel/sched.c 		p->sched_class->switched_to(rq, p, running);
rq               1781 kernel/sched.c 		p->sched_class->prio_changed(rq, p, oldprio, running);
rq               1823 kernel/sched.c 	struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
rq               1865 kernel/sched.c 	struct rq *rq = task_rq(p);
rq               1871 kernel/sched.c 	if (!p->se.on_rq && !task_running(rq, p)) {
rq               1879 kernel/sched.c 	list_add(&req->list, &rq->migration_queue);
rq               1905 kernel/sched.c 	struct rq *rq;
rq               1914 kernel/sched.c 		rq = task_rq(p);
rq               1927 kernel/sched.c 		while (task_running(rq, p)) {
rq               1938 kernel/sched.c 		rq = task_rq_lock(p, &flags);
rq               1939 kernel/sched.c 		running = task_running(rq, p);
rq               1944 kernel/sched.c 		task_rq_unlock(rq, &flags);
rq               2021 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               2027 kernel/sched.c 	return min(rq->cpu_load[type-1], total);
rq               2036 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               2042 kernel/sched.c 	return max(rq->cpu_load[type-1], total);
rq               2215 kernel/sched.c 	struct rq *rq;
rq               2237 kernel/sched.c 	rq = task_rq_lock(p, &flags);
rq               2250 kernel/sched.c 	if (unlikely(task_running(rq, p)))
rq               2256 kernel/sched.c 		task_rq_unlock(rq, &flags);
rq               2258 kernel/sched.c 		rq = task_rq_lock(p, &flags);
rq               2270 kernel/sched.c 	schedstat_inc(rq, ttwu_count);
rq               2272 kernel/sched.c 		schedstat_inc(rq, ttwu_local);
rq               2295 kernel/sched.c 	update_rq_clock(rq);
rq               2296 kernel/sched.c 	activate_task(rq, p, 1);
rq               2302 kernel/sched.c 		p->pid, p->state, rq, p, rq->curr);
rq               2303 kernel/sched.c 	check_preempt_curr(rq, p, sync);
rq               2308 kernel/sched.c 		p->sched_class->task_wake_up(rq, p);
rq               2313 kernel/sched.c 	task_rq_unlock(rq, &flags);
rq               2417 kernel/sched.c 	struct rq *rq;
rq               2419 kernel/sched.c 	rq = task_rq_lock(p, &flags);
rq               2421 kernel/sched.c 	update_rq_clock(rq);
rq               2426 kernel/sched.c 		activate_task(rq, p, 0);
rq               2432 kernel/sched.c 		p->sched_class->task_new(rq, p);
rq               2433 kernel/sched.c 		inc_nr_running(rq);
rq               2437 kernel/sched.c 		p->pid, p->state, rq, p, rq->curr);
rq               2438 kernel/sched.c 	check_preempt_curr(rq, p, 0);
rq               2441 kernel/sched.c 		p->sched_class->task_wake_up(rq, p);
rq               2443 kernel/sched.c 	task_rq_unlock(rq, &flags);
rq               2522 kernel/sched.c 	prepare_lock_switch(rq, next);
rq               2544 kernel/sched.c 	struct mm_struct *mm = rq->prev_mm;
rq               2547 kernel/sched.c 	rq->prev_mm = NULL;
rq               2562 kernel/sched.c 	finish_lock_switch(rq, prev);
rq               2565 kernel/sched.c 		current->sched_class->post_schedule(rq);
rq               2588 kernel/sched.c 	struct rq *rq = this_rq();
rq               2590 kernel/sched.c 	finish_task_switch(rq, prev);
rq               2609 kernel/sched.c 	prepare_task_switch(rq, prev, next);
rq               2614 kernel/sched.c 		rq, prev, next);
rq               2633 kernel/sched.c 		rq->prev_mm = oldmm;
rq               2642 kernel/sched.c 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
rq               2847 kernel/sched.c 	struct rq *rq;
rq               2849 kernel/sched.c 	rq = task_rq_lock(p, &flags);
rq               2857 kernel/sched.c 		struct task_struct *mt = rq->migration_thread;
rq               2860 kernel/sched.c 		task_rq_unlock(rq, &flags);
rq               2868 kernel/sched.c 	task_rq_unlock(rq, &flags);
rq               2921 kernel/sched.c 	if (task_running(rq, p)) {
rq               2932 kernel/sched.c 	if (!task_hot(p, rq->clock, sd) ||
rq               2935 kernel/sched.c 		if (task_hot(p, rq->clock, sd)) {
rq               2943 kernel/sched.c 	if (task_hot(p, rq->clock, sd)) {
rq               3139 kernel/sched.c 			struct rq *rq;
rq               3144 kernel/sched.c 			rq = cpu_rq(i);
rq               3146 kernel/sched.c 			if (*sd_idle && rq->nr_running)
rq               3166 kernel/sched.c 			sum_nr_running += rq->nr_running;
rq               3409 kernel/sched.c static struct rq *
rq               3413 kernel/sched.c 	struct rq *busiest = NULL, *rq;
rq               3423 kernel/sched.c 		rq = cpu_rq(i);
rq               3426 kernel/sched.c 		if (rq->nr_running == 1 && wl > imbalance)
rq               3431 kernel/sched.c 			busiest = rq;
rq               3455 kernel/sched.c 	struct rq *busiest;
rq               3614 kernel/sched.c 	struct rq *busiest = NULL;
rq               3740 kernel/sched.c 	struct rq *target_rq;
rq               3864 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               3896 kernel/sched.c 			if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
rq               3929 kernel/sched.c 		rq->next_balance = next_balance;
rq               3940 kernel/sched.c 	struct rq *this_rq = cpu_rq(this_cpu);
rq               3955 kernel/sched.c 		struct rq *rq;
rq               3970 kernel/sched.c 			rq = cpu_rq(balance_cpu);
rq               3971 kernel/sched.c 			if (time_after(this_rq->next_balance, rq->next_balance))
rq               3972 kernel/sched.c 				this_rq->next_balance = rq->next_balance;
rq               3993 kernel/sched.c 	if (rq->in_nohz_recently && !rq->idle_at_tick) {
rq               3994 kernel/sched.c 		rq->in_nohz_recently = 0;
rq               4021 kernel/sched.c 	if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
rq               4031 kernel/sched.c 	if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
rq               4035 kernel/sched.c 	if (time_after_eq(jiffies, rq->next_balance))
rq               4062 kernel/sched.c 	struct rq *rq;
rq               4064 kernel/sched.c 	rq = task_rq_lock(p, &flags);
rq               4066 kernel/sched.c 	if (task_current(rq, p)) {
rq               4067 kernel/sched.c 		update_rq_clock(rq);
rq               4068 kernel/sched.c 		delta_exec = rq->clock - p->se.exec_start;
rq               4072 kernel/sched.c 	task_rq_unlock(rq, &flags);
rq               4138 kernel/sched.c 	struct rq *rq = this_rq();
rq               4154 kernel/sched.c 	else if (p != rq->idle)
rq               4156 kernel/sched.c 	else if (atomic_read(&rq->nr_iowait) > 0)
rq               4184 kernel/sched.c 	struct rq *rq = this_rq();
rq               4186 kernel/sched.c 	if (p == rq->idle) {
rq               4188 kernel/sched.c 		if (atomic_read(&rq->nr_iowait) > 0)
rq               4265 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               4266 kernel/sched.c 	struct task_struct *curr = rq->curr;
rq               4270 kernel/sched.c 	spin_lock(&rq->lock);
rq               4271 kernel/sched.c 	update_rq_clock(rq);
rq               4272 kernel/sched.c 	update_cpu_load(rq);
rq               4273 kernel/sched.c 	curr->sched_class->task_tick(rq, curr, 0);
rq               4274 kernel/sched.c 	spin_unlock(&rq->lock);
rq               4277 kernel/sched.c 	rq->idle_at_tick = idle_cpu(cpu);
rq               4278 kernel/sched.c 	trigger_load_balance(rq, cpu);
rq               4399 kernel/sched.c 	if (likely(rq->nr_running == rq->cfs.nr_running)) {
rq               4400 kernel/sched.c 		p = fair_sched_class.pick_next_task(rq);
rq               4407 kernel/sched.c 		p = class->pick_next_task(rq);
rq               4425 kernel/sched.c 	struct rq *rq;
rq               4431 kernel/sched.c 	rq = cpu_rq(cpu);
rq               4433 kernel/sched.c 	prev = rq->curr;
rq               4442 kernel/sched.c 		hrtick_clear(rq);
rq               4448 kernel/sched.c 	update_rq_clock(rq);
rq               4449 kernel/sched.c 	spin_lock(&rq->lock);
rq               4456 kernel/sched.c 			deactivate_task(rq, prev, 1);
rq               4462 kernel/sched.c 		prev->sched_class->pre_schedule(rq, prev);
rq               4465 kernel/sched.c 	if (unlikely(!rq->nr_running))
rq               4466 kernel/sched.c 		idle_balance(cpu, rq);
rq               4468 kernel/sched.c 	prev->sched_class->put_prev_task(rq, prev);
rq               4469 kernel/sched.c 	next = pick_next_task(rq, prev);
rq               4474 kernel/sched.c 		rq->nr_switches++;
rq               4475 kernel/sched.c 		rq->curr = next;
rq               4478 kernel/sched.c 		context_switch(rq, prev, next); /* unlocks the rq */
rq               4484 kernel/sched.c 		rq = cpu_rq(cpu);
rq               4486 kernel/sched.c 		spin_unlock_irq(&rq->lock);
rq               4907 kernel/sched.c 	struct rq *rq;
rq               4912 kernel/sched.c 	rq = task_rq_lock(p, &flags);
rq               4913 kernel/sched.c 	update_rq_clock(rq);
rq               4917 kernel/sched.c 	running = task_current(rq, p);
rq               4919 kernel/sched.c 		dequeue_task(rq, p, 0);
rq               4921 kernel/sched.c 		p->sched_class->put_prev_task(rq, p);
rq               4931 kernel/sched.c 		p->sched_class->set_curr_task(rq);
rq               4933 kernel/sched.c 		enqueue_task(rq, p, 0);
rq               4935 kernel/sched.c 		check_class_changed(rq, p, prev_class, oldprio, running);
rq               4937 kernel/sched.c 	task_rq_unlock(rq, &flags);
rq               4946 kernel/sched.c 	struct rq *rq;
rq               4954 kernel/sched.c 	rq = task_rq_lock(p, &flags);
rq               4955 kernel/sched.c 	update_rq_clock(rq);
rq               4968 kernel/sched.c 		dequeue_task(rq, p, 0);
rq               4977 kernel/sched.c 		enqueue_task(rq, p, 0);
rq               4982 kernel/sched.c 		if (delta < 0 || (delta > 0 && task_running(rq, p)))
rq               4983 kernel/sched.c 			resched_task(rq->curr);
rq               4986 kernel/sched.c 	task_rq_unlock(rq, &flags);
rq               5128 kernel/sched.c 	struct rq *rq;
rq               5211 kernel/sched.c 	rq = __task_rq_lock(p);
rq               5215 kernel/sched.c 		__task_rq_unlock(rq);
rq               5219 kernel/sched.c 	update_rq_clock(rq);
rq               5221 kernel/sched.c 	running = task_current(rq, p);
rq               5223 kernel/sched.c 		deactivate_task(rq, p, 0);
rq               5225 kernel/sched.c 		p->sched_class->put_prev_task(rq, p);
rq               5228 kernel/sched.c 	__setscheduler(rq, p, policy, param->sched_priority);
rq               5231 kernel/sched.c 		p->sched_class->set_curr_task(rq);
rq               5233 kernel/sched.c 		activate_task(rq, p, 0);
rq               5235 kernel/sched.c 		check_class_changed(rq, p, prev_class, oldprio, running);
rq               5237 kernel/sched.c 	__task_rq_unlock(rq);
rq               5534 kernel/sched.c 	struct rq *rq = this_rq_lock();
rq               5536 kernel/sched.c 	schedstat_inc(rq, yld_count);
rq               5537 kernel/sched.c 	current->sched_class->yield_task(rq);
rq               5543 kernel/sched.c 	__release(rq->lock);
rq               5544 kernel/sched.c 	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
rq               5545 kernel/sched.c 	_raw_spin_unlock(&rq->lock);
rq               5643 kernel/sched.c 	struct rq *rq = &__raw_get_cpu_var(runqueues);
rq               5646 kernel/sched.c 	atomic_inc(&rq->nr_iowait);
rq               5648 kernel/sched.c 	atomic_dec(&rq->nr_iowait);
rq               5655 kernel/sched.c 	struct rq *rq = &__raw_get_cpu_var(runqueues);
rq               5659 kernel/sched.c 	atomic_inc(&rq->nr_iowait);
rq               5661 kernel/sched.c 	atomic_dec(&rq->nr_iowait);
rq               5754 kernel/sched.c 		struct rq *rq;
rq               5756 kernel/sched.c 		rq = task_rq_lock(p, &flags);
rq               5757 kernel/sched.c 		if (rq->cfs.load.weight)
rq               5758 kernel/sched.c 			time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
rq               5759 kernel/sched.c 		task_rq_unlock(rq, &flags);
rq               5856 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               5866 kernel/sched.c 	spin_lock_irqsave(&rq->lock, flags);
rq               5867 kernel/sched.c 	rq->curr = rq->idle = idle;
rq               5871 kernel/sched.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq               5951 kernel/sched.c 	struct rq *rq;
rq               5954 kernel/sched.c 	rq = task_rq_lock(p, &flags);
rq               5979 kernel/sched.c 		task_rq_unlock(rq, &flags);
rq               5980 kernel/sched.c 		wake_up_process(rq->migration_thread);
rq               5986 kernel/sched.c 	task_rq_unlock(rq, &flags);
rq               6005 kernel/sched.c 	struct rq *rq_dest, *rq_src;
rq               6046 kernel/sched.c 	struct rq *rq;
rq               6048 kernel/sched.c 	rq = cpu_rq(cpu);
rq               6049 kernel/sched.c 	BUG_ON(rq->migration_thread != current);
rq               6056 kernel/sched.c 		spin_lock_irq(&rq->lock);
rq               6059 kernel/sched.c 			spin_unlock_irq(&rq->lock);
rq               6063 kernel/sched.c 		if (rq->active_balance) {
rq               6064 kernel/sched.c 			active_load_balance(rq, cpu);
rq               6065 kernel/sched.c 			rq->active_balance = 0;
rq               6068 kernel/sched.c 		head = &rq->migration_queue;
rq               6071 kernel/sched.c 			spin_unlock_irq(&rq->lock);
rq               6079 kernel/sched.c 		spin_unlock(&rq->lock);
rq               6119 kernel/sched.c 	struct rq *rq;
rq               6144 kernel/sched.c 			rq = task_rq_lock(p, &flags);
rq               6147 kernel/sched.c 			task_rq_unlock(rq, &flags);
rq               6172 kernel/sched.c 	struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR));
rq               6209 kernel/sched.c 	struct rq *rq = cpu_rq(this_cpu);
rq               6210 kernel/sched.c 	struct task_struct *p = rq->idle;
rq               6220 kernel/sched.c 	spin_lock_irqsave(&rq->lock, flags);
rq               6222 kernel/sched.c 	__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
rq               6224 kernel/sched.c 	update_rq_clock(rq);
rq               6225 kernel/sched.c 	activate_task(rq, p, 0);
rq               6227 kernel/sched.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq               6248 kernel/sched.c 	struct rq *rq = cpu_rq(dead_cpu);
rq               6263 kernel/sched.c 	spin_unlock_irq(&rq->lock);
rq               6265 kernel/sched.c 	spin_lock_irq(&rq->lock);
rq               6273 kernel/sched.c 	struct rq *rq = cpu_rq(dead_cpu);
rq               6277 kernel/sched.c 		if (!rq->nr_running)
rq               6279 kernel/sched.c 		update_rq_clock(rq);
rq               6280 kernel/sched.c 		next = pick_next_task(rq, rq->curr);
rq               6283 kernel/sched.c 		next->sched_class->put_prev_task(rq, next);
rq               6459 kernel/sched.c 	if (!rq->online) {
rq               6462 kernel/sched.c 		cpu_set(rq->cpu, rq->rd->online);
rq               6463 kernel/sched.c 		rq->online = 1;
rq               6467 kernel/sched.c 				class->rq_online(rq);
rq               6474 kernel/sched.c 	if (rq->online) {
rq               6479 kernel/sched.c 				class->rq_offline(rq);
rq               6482 kernel/sched.c 		cpu_clear(rq->cpu, rq->rd->online);
rq               6483 kernel/sched.c 		rq->online = 0;
rq               6497 kernel/sched.c 	struct rq *rq;
rq               6508 kernel/sched.c 		rq = task_rq_lock(p, &flags);
rq               6509 kernel/sched.c 		__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
rq               6510 kernel/sched.c 		task_rq_unlock(rq, &flags);
rq               6520 kernel/sched.c 		rq = cpu_rq(cpu);
rq               6521 kernel/sched.c 		spin_lock_irqsave(&rq->lock, flags);
rq               6522 kernel/sched.c 		if (rq->rd) {
rq               6523 kernel/sched.c 			BUG_ON(!cpu_isset(cpu, rq->rd->span));
rq               6525 kernel/sched.c 			set_rq_online(rq);
rq               6527 kernel/sched.c 		spin_unlock_irqrestore(&rq->lock, flags);
rq               6546 kernel/sched.c 		rq = cpu_rq(cpu);
rq               6547 kernel/sched.c 		kthread_stop(rq->migration_thread);
rq               6548 kernel/sched.c 		rq->migration_thread = NULL;
rq               6550 kernel/sched.c 		spin_lock_irq(&rq->lock);
rq               6551 kernel/sched.c 		update_rq_clock(rq);
rq               6552 kernel/sched.c 		deactivate_task(rq, rq->idle, 0);
rq               6553 kernel/sched.c 		rq->idle->static_prio = MAX_PRIO;
rq               6554 kernel/sched.c 		__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
rq               6555 kernel/sched.c 		rq->idle->sched_class = &idle_sched_class;
rq               6557 kernel/sched.c 		spin_unlock_irq(&rq->lock);
rq               6559 kernel/sched.c 		migrate_nr_uninterruptible(rq);
rq               6560 kernel/sched.c 		BUG_ON(rq->nr_running != 0);
rq               6567 kernel/sched.c 		spin_lock_irq(&rq->lock);
rq               6568 kernel/sched.c 		while (!list_empty(&rq->migration_queue)) {
rq               6571 kernel/sched.c 			req = list_entry(rq->migration_queue.next,
rq               6576 kernel/sched.c 		spin_unlock_irq(&rq->lock);
rq               6582 kernel/sched.c 		rq = cpu_rq(cpu);
rq               6583 kernel/sched.c 		spin_lock_irqsave(&rq->lock, flags);
rq               6584 kernel/sched.c 		if (rq->rd) {
rq               6585 kernel/sched.c 			BUG_ON(!cpu_isset(cpu, rq->rd->span));
rq               6586 kernel/sched.c 			set_rq_offline(rq);
rq               6588 kernel/sched.c 		spin_unlock_irqrestore(&rq->lock, flags);
rq               6812 kernel/sched.c 	spin_lock_irqsave(&rq->lock, flags);
rq               6814 kernel/sched.c 	if (rq->rd) {
rq               6815 kernel/sched.c 		struct root_domain *old_rd = rq->rd;
rq               6817 kernel/sched.c 		if (cpu_isset(rq->cpu, old_rd->online))
rq               6818 kernel/sched.c 			set_rq_offline(rq);
rq               6820 kernel/sched.c 		cpu_clear(rq->cpu, old_rd->span);
rq               6827 kernel/sched.c 	rq->rd = rd;
rq               6829 kernel/sched.c 	cpu_set(rq->cpu, rd->span);
rq               6830 kernel/sched.c 	if (cpu_isset(rq->cpu, cpu_online_map))
rq               6831 kernel/sched.c 		set_rq_online(rq);
rq               6833 kernel/sched.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq               6872 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               6895 kernel/sched.c 	rq_attach_root(rq, rd);
rq               6896 kernel/sched.c 	rcu_assign_pointer(rq->sd, sd);
rq               8021 kernel/sched.c 	cfs_rq->rq = rq;
rq               8054 kernel/sched.c 	rt_rq->rq = rq;
rq               8063 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               8065 kernel/sched.c 	init_cfs_rq(cfs_rq, rq);
rq               8068 kernel/sched.c 		list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
rq               8076 kernel/sched.c 		se->cfs_rq = &rq->cfs;
rq               8092 kernel/sched.c 	struct rq *rq = cpu_rq(cpu);
rq               8095 kernel/sched.c 	init_rt_rq(rt_rq, rq);
rq               8100 kernel/sched.c 		list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
rq               8107 kernel/sched.c 		rt_se->rt_rq = &rq->rt;
rq               8198 kernel/sched.c 		struct rq *rq;
rq               8200 kernel/sched.c 		rq = cpu_rq(i);
rq               8201 kernel/sched.c 		spin_lock_init(&rq->lock);
rq               8202 kernel/sched.c 		rq->nr_running = 0;
rq               8203 kernel/sched.c 		init_cfs_rq(&rq->cfs, rq);
rq               8204 kernel/sched.c 		init_rt_rq(&rq->rt, rq);
rq               8207 kernel/sched.c 		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
rq               8228 kernel/sched.c 		init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
rq               8231 kernel/sched.c 		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
rq               8251 kernel/sched.c 		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
rq               8253 kernel/sched.c 		INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
rq               8255 kernel/sched.c 		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
rq               8257 kernel/sched.c 		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
rq               8266 kernel/sched.c 			rq->cpu_load[j] = 0;
rq               8268 kernel/sched.c 		rq->sd = NULL;
rq               8269 kernel/sched.c 		rq->rd = NULL;
rq               8270 kernel/sched.c 		rq->active_balance = 0;
rq               8271 kernel/sched.c 		rq->next_balance = jiffies;
rq               8272 kernel/sched.c 		rq->push_cpu = 0;
rq               8273 kernel/sched.c 		rq->cpu = i;
rq               8274 kernel/sched.c 		rq->online = 0;
rq               8275 kernel/sched.c 		rq->migration_thread = NULL;
rq               8276 kernel/sched.c 		INIT_LIST_HEAD(&rq->migration_queue);
rq               8277 kernel/sched.c 		rq_attach_root(rq, &def_root_domain);
rq               8279 kernel/sched.c 		init_rq_hrtick(rq);
rq               8280 kernel/sched.c 		atomic_set(&rq->nr_iowait, 0);
rq               8353 kernel/sched.c 	update_rq_clock(rq);
rq               8356 kernel/sched.c 		deactivate_task(rq, p, 0);
rq               8357 kernel/sched.c 	__setscheduler(rq, p, SCHED_NORMAL, 0);
rq               8359 kernel/sched.c 		activate_task(rq, p, 0);
rq               8360 kernel/sched.c 		resched_task(rq->curr);
rq               8368 kernel/sched.c 	struct rq *rq;
rq               8396 kernel/sched.c 		rq = __task_rq_lock(p);
rq               8398 kernel/sched.c 		normalize_task(rq, p);
rq               8400 kernel/sched.c 		__task_rq_unlock(rq);
rq               8474 kernel/sched.c 	struct rq *rq;
rq               8487 kernel/sched.c 		rq = cpu_rq(i);
rq               8562 kernel/sched.c 	struct rq *rq;
rq               8576 kernel/sched.c 		rq = cpu_rq(i);
rq               8709 kernel/sched.c 	struct rq *rq;
rq               8711 kernel/sched.c 	rq = task_rq_lock(tsk, &flags);
rq               8713 kernel/sched.c 	update_rq_clock(rq);
rq               8715 kernel/sched.c 	running = task_current(rq, tsk);
rq               8719 kernel/sched.c 		dequeue_task(rq, tsk, 0);
rq               8721 kernel/sched.c 		tsk->sched_class->put_prev_task(rq, tsk);
rq               8731 kernel/sched.c 		tsk->sched_class->set_curr_task(rq);
rq               8733 kernel/sched.c 		enqueue_task(rq, tsk, 0);
rq               8735 kernel/sched.c 	task_rq_unlock(rq, &flags);
rq               8759 kernel/sched.c 	struct rq *rq = cfs_rq->rq;
rq               8762 kernel/sched.c 	spin_lock_irqsave(&rq->lock, flags);
rq               8764 kernel/sched.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq                 59 kernel/sched_debug.c 	if (rq->curr == p)
rq                108 kernel/sched_debug.c 		print_task(m, rq, p);
rq                118 kernel/sched_debug.c 	struct rq *rq = &per_cpu(runqueues, cpu);
rq                141 kernel/sched_debug.c 	spin_lock_irqsave(&rq->lock, flags);
rq                147 kernel/sched_debug.c 	min_vruntime = rq->cfs.min_vruntime;
rq                149 kernel/sched_debug.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq                165 kernel/sched_debug.c #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
rq                227 kernel/sched_debug.c 	struct rq *rq = &per_cpu(runqueues, cpu);
rq                241 kernel/sched_debug.c 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x))
rq                243 kernel/sched_debug.c 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
rq                247 kernel/sched_debug.c 		   rq->load.weight);
rq                266 kernel/sched_debug.c 	print_rq(m, rq, cpu);
rq                 88 kernel/sched_fair.c static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
rq                 90 kernel/sched_fair.c 	return cfs_rq->rq;
rq                127 kernel/sched_fair.c 	list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
rq                146 kernel/sched_fair.c static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
rq                148 kernel/sched_fair.c 	return container_of(cfs_rq, struct rq, cfs);
rq                164 kernel/sched_fair.c 	struct rq *rq = task_rq(p);
rq                166 kernel/sched_fair.c 	return &rq->cfs;
rq                181 kernel/sched_fair.c 		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
rq                747 kernel/sched_fair.c 	struct rq *rq = rq_of(cfs_rq);
rq                748 kernel/sched_fair.c 	u64 pair_slice = rq->clock - cfs_rq->pair_start;
rq                751 kernel/sched_fair.c 		cfs_rq->pair_start = rq->clock;
rq                828 kernel/sched_fair.c 	WARN_ON(task_rq(p) != rq);
rq                830 kernel/sched_fair.c 	if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
rq                836 kernel/sched_fair.c 			if (rq->curr == p)
rq                845 kernel/sched_fair.c 		if (rq->curr != p)
rq                848 kernel/sched_fair.c 		hrtick_start(rq, delta);
rq                876 kernel/sched_fair.c 	hrtick_start_fair(rq, rq->curr);
rq                898 kernel/sched_fair.c 	hrtick_start_fair(rq, rq->curr);
rq                908 kernel/sched_fair.c 	struct task_struct *curr = rq->curr;
rq                919 kernel/sched_fair.c 		update_rq_clock(rq);
rq               1161 kernel/sched_fair.c 	struct rq *this_rq;
rq               1239 kernel/sched_fair.c 	struct task_struct *curr = rq->curr;
rq               1245 kernel/sched_fair.c 		update_rq_clock(rq);
rq               1288 kernel/sched_fair.c 	struct cfs_rq *cfs_rq = &rq->cfs;
rq               1300 kernel/sched_fair.c 	hrtick_start_fair(rq, p);
rq               1505 kernel/sched_fair.c 		resched_task(rq->curr);
rq               1508 kernel/sched_fair.c 	enqueue_task_fair(rq, p, 0);
rq               1525 kernel/sched_fair.c 			resched_task(rq->curr);
rq               1527 kernel/sched_fair.c 		check_preempt_curr(rq, p, 0);
rq               1542 kernel/sched_fair.c 		resched_task(rq->curr);
rq               1544 kernel/sched_fair.c 		check_preempt_curr(rq, p, 0);
rq               1554 kernel/sched_fair.c 	struct sched_entity *se = &rq->curr->se;
rq                 19 kernel/sched_idletask.c 	resched_task(rq->idle);
rq                 24 kernel/sched_idletask.c 	schedstat_inc(rq, sched_goidle);
rq                 26 kernel/sched_idletask.c 	return rq->idle;
rq                 36 kernel/sched_idletask.c 	spin_unlock_irq(&rq->lock);
rq                 39 kernel/sched_idletask.c 	spin_lock_irq(&rq->lock);
rq                 77 kernel/sched_idletask.c 		resched_task(rq->curr);
rq                 79 kernel/sched_idletask.c 		check_preempt_curr(rq, p, 0);
rq                 94 kernel/sched_idletask.c 			resched_task(rq->curr);
rq                 96 kernel/sched_idletask.c 		check_preempt_curr(rq, p, 0);
rq                 10 kernel/sched_rt.c 	return atomic_read(&rq->rd->rto_count);
rq                 15 kernel/sched_rt.c 	if (!rq->online)
rq                 18 kernel/sched_rt.c 	cpu_set(rq->cpu, rq->rd->rto_mask);
rq                 27 kernel/sched_rt.c 	atomic_inc(&rq->rd->rto_count);
rq                 32 kernel/sched_rt.c 	if (!rq->online)
rq                 36 kernel/sched_rt.c 	atomic_dec(&rq->rd->rto_count);
rq                 37 kernel/sched_rt.c 	cpu_clear(rq->cpu, rq->rd->rto_mask);
rq                 42 kernel/sched_rt.c 	if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
rq                 43 kernel/sched_rt.c 		if (!rq->rt.overloaded) {
rq                 44 kernel/sched_rt.c 			rt_set_overload(rq);
rq                 45 kernel/sched_rt.c 			rq->rt.overloaded = 1;
rq                 47 kernel/sched_rt.c 	} else if (rq->rt.overloaded) {
rq                 48 kernel/sched_rt.c 		rt_clear_overload(rq);
rq                 49 kernel/sched_rt.c 		rq->rt.overloaded = 0;
rq                 80 kernel/sched_rt.c 	list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
rq                 82 kernel/sched_rt.c static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
rq                 84 kernel/sched_rt.c 	return rt_rq->rq;
rq                177 kernel/sched_rt.c 	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
rq                179 kernel/sched_rt.c static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
rq                181 kernel/sched_rt.c 	return container_of(rt_rq, struct rq, rt);
rq                187 kernel/sched_rt.c 	struct rq *rq = task_rq(p);
rq                189 kernel/sched_rt.c 	return &rq->rt;
rq                294 kernel/sched_rt.c 	struct root_domain *rd = rq->rd;
rq                300 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, rq) {
rq                373 kernel/sched_rt.c 	spin_lock_irqsave(&rq->lock, flags);
rq                374 kernel/sched_rt.c 	__disable_runtime(rq);
rq                375 kernel/sched_rt.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq                388 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, rq) {
rq                405 kernel/sched_rt.c 	spin_lock_irqsave(&rq->lock, flags);
rq                406 kernel/sched_rt.c 	__enable_runtime(rq);
rq                407 kernel/sched_rt.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq                441 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rq                443 kernel/sched_rt.c 		spin_lock(&rq->lock);
rq                464 kernel/sched_rt.c 		spin_unlock(&rq->lock);
rq                514 kernel/sched_rt.c 	struct task_struct *curr = rq->curr;
rq                522 kernel/sched_rt.c 	delta_exec = rq->clock - curr->se.exec_start;
rq                529 kernel/sched_rt.c 	curr->se.exec_start = rq->clock;
rq                556 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rq                561 kernel/sched_rt.c 		if (rq->online)
rq                562 kernel/sched_rt.c 			cpupri_set(&rq->rd->cpupri, rq->cpu,
rq                569 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rq                571 kernel/sched_rt.c 		rq->rt.rt_nr_migratory++;
rq                613 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rq                614 kernel/sched_rt.c 		rq->rt.rt_nr_migratory--;
rq                618 kernel/sched_rt.c 		struct rq *rq = rq_of_rt_rq(rt_rq);
rq                620 kernel/sched_rt.c 		if (rq->online)
rq                621 kernel/sched_rt.c 			cpupri_set(&rq->rd->cpupri, rq->cpu,
rq                719 kernel/sched_rt.c 	inc_cpu_load(rq, p->se.load.weight);
rq                726 kernel/sched_rt.c 	update_curr_rt(rq);
rq                729 kernel/sched_rt.c 	dec_cpu_load(rq, p->se.load.weight);
rq                763 kernel/sched_rt.c 	requeue_task_rt(rq, rq->curr, 0);
rq                771 kernel/sched_rt.c 	struct rq *rq = task_rq(p);
rq                790 kernel/sched_rt.c 	if (unlikely(rt_task(rq->curr)) &&
rq                808 kernel/sched_rt.c 	if (rq->curr->rt.nr_cpus_allowed == 1)
rq                812 kernel/sched_rt.c 	    && cpupri_find(&rq->rd->cpupri, p, &mask))
rq                815 kernel/sched_rt.c 	if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
rq                823 kernel/sched_rt.c 	requeue_task_rt(rq, p, 1);
rq                824 kernel/sched_rt.c 	resched_task(rq->curr);
rq                834 kernel/sched_rt.c 	if (p->prio < rq->curr->prio) {
rq                835 kernel/sched_rt.c 		resched_task(rq->curr);
rq                852 kernel/sched_rt.c 	if (p->prio == rq->curr->prio && !need_resched())
rq                853 kernel/sched_rt.c 		check_preempt_equal_prio(rq, p);
rq                880 kernel/sched_rt.c 	rt_rq = &rq->rt;
rq                889 kernel/sched_rt.c 		rt_se = pick_next_rt_entity(rq, rt_rq);
rq                895 kernel/sched_rt.c 	p->se.exec_start = rq->clock;
rq                901 kernel/sched_rt.c 	update_curr_rt(rq);
rq                917 kernel/sched_rt.c 	if (!task_running(rq, p) &&
rq                933 kernel/sched_rt.c 	for_each_leaf_rt_rq(rt_rq, rq) {
rq                943 kernel/sched_rt.c 			if (pick_rt_task(rq, p, cpu)) {
rq               1035 kernel/sched_rt.c static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
rq               1037 kernel/sched_rt.c 	struct rq *lowest_rq = NULL;
rq               1044 kernel/sched_rt.c 		if ((cpu == -1) || (cpu == rq->cpu))
rq               1050 kernel/sched_rt.c 		if (double_lock_balance(rq, lowest_rq)) {
rq               1057 kernel/sched_rt.c 			if (unlikely(task_rq(task) != rq ||
rq               1060 kernel/sched_rt.c 				     task_running(rq, task) ||
rq               1074 kernel/sched_rt.c 		double_unlock_balance(rq, lowest_rq);
rq               1089 kernel/sched_rt.c 	struct rq *lowest_rq;
rq               1093 kernel/sched_rt.c 	if (!rq->rt.overloaded)
rq               1096 kernel/sched_rt.c 	next_task = pick_next_highest_task_rt(rq, -1);
rq               1101 kernel/sched_rt.c 	if (unlikely(next_task == rq->curr)) {
rq               1111 kernel/sched_rt.c 	if (unlikely(next_task->prio < rq->curr->prio)) {
rq               1112 kernel/sched_rt.c 		resched_task(rq->curr);
rq               1120 kernel/sched_rt.c 	lowest_rq = find_lock_lowest_rq(next_task, rq);
rq               1128 kernel/sched_rt.c 		task = pick_next_highest_task_rt(rq, -1);
rq               1137 kernel/sched_rt.c 	deactivate_task(rq, next_task, 0);
rq               1143 kernel/sched_rt.c 	double_unlock_balance(rq, lowest_rq);
rq               1165 kernel/sched_rt.c 	while (push_rt_task(rq))
rq               1173 kernel/sched_rt.c 	struct rq *src_rq;
rq               1258 kernel/sched_rt.c 	if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
rq               1259 kernel/sched_rt.c 		pull_rt_task(rq);
rq               1271 kernel/sched_rt.c 	if (unlikely(rq->rt.overloaded)) {
rq               1272 kernel/sched_rt.c 		spin_lock_irq(&rq->lock);
rq               1273 kernel/sched_rt.c 		push_rt_tasks(rq);
rq               1274 kernel/sched_rt.c 		spin_unlock_irq(&rq->lock);
rq               1284 kernel/sched_rt.c 	if (!task_running(rq, p) &&
rq               1285 kernel/sched_rt.c 	    !test_tsk_need_resched(rq->curr) &&
rq               1286 kernel/sched_rt.c 	    rq->rt.overloaded)
rq               1287 kernel/sched_rt.c 		push_rt_tasks(rq);
rq               1320 kernel/sched_rt.c 		struct rq *rq = task_rq(p);
rq               1323 kernel/sched_rt.c 			rq->rt.rt_nr_migratory++;
rq               1325 kernel/sched_rt.c 			BUG_ON(!rq->rt.rt_nr_migratory);
rq               1326 kernel/sched_rt.c 			rq->rt.rt_nr_migratory--;
rq               1329 kernel/sched_rt.c 		update_rt_migration(rq);
rq               1339 kernel/sched_rt.c 	if (rq->rt.overloaded)
rq               1340 kernel/sched_rt.c 		rt_set_overload(rq);
rq               1342 kernel/sched_rt.c 	__enable_runtime(rq);
rq               1344 kernel/sched_rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
rq               1350 kernel/sched_rt.c 	if (rq->rt.overloaded)
rq               1351 kernel/sched_rt.c 		rt_clear_overload(rq);
rq               1353 kernel/sched_rt.c 	__disable_runtime(rq);
rq               1355 kernel/sched_rt.c 	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
rq               1372 kernel/sched_rt.c 	if (!rq->rt.rt_nr_running)
rq               1373 kernel/sched_rt.c 		pull_rt_task(rq);
rq               1396 kernel/sched_rt.c 		if (rq->rt.overloaded && push_rt_task(rq) &&
rq               1398 kernel/sched_rt.c 		    rq != task_rq(p))
rq               1401 kernel/sched_rt.c 		if (check_resched && p->prio < rq->curr->prio)
rq               1402 kernel/sched_rt.c 			resched_task(rq->curr);
rq               1420 kernel/sched_rt.c 			pull_rt_task(rq);
rq               1427 kernel/sched_rt.c 		if (p->prio > rq->rt.highest_prio && rq->curr == p)
rq               1440 kernel/sched_rt.c 		if (p->prio < rq->curr->prio)
rq               1441 kernel/sched_rt.c 			resched_task(rq->curr);
rq               1467 kernel/sched_rt.c 	update_curr_rt(rq);
rq               1469 kernel/sched_rt.c 	watchdog(rq, p);
rq               1488 kernel/sched_rt.c 		requeue_task_rt(rq, p, 0);
rq               1495 kernel/sched_rt.c 	struct task_struct *p = rq->curr;
rq               1497 kernel/sched_rt.c 	p->se.exec_start = rq->clock;
rq                 21 kernel/sched_stats.h 		struct rq *rq = cpu_rq(cpu);
rq                 30 kernel/sched_stats.h 		    cpu, rq->yld_both_empty,
rq                 31 kernel/sched_stats.h 		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
rq                 32 kernel/sched_stats.h 		    rq->sched_switch, rq->sched_count, rq->sched_goidle,
rq                 33 kernel/sched_stats.h 		    rq->ttwu_count, rq->ttwu_local,
rq                 34 kernel/sched_stats.h 		    rq->rq_sched_info.cpu_time,
rq                 35 kernel/sched_stats.h 		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
rq                106 kernel/sched_stats.h 	if (rq) {
rq                107 kernel/sched_stats.h 		rq->rq_sched_info.run_delay += delta;
rq                108 kernel/sched_stats.h 		rq->rq_sched_info.pcount++;
rq                118 kernel/sched_stats.h 	if (rq)
rq                119 kernel/sched_stats.h 		rq->rq_sched_info.cpu_time += delta;
rq                125 kernel/sched_stats.h 	if (rq)
rq                126 kernel/sched_stats.h 		rq->rq_sched_info.run_delay += delta;
rq                128 kernel/sched_stats.h # define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
rq                129 kernel/sched_stats.h # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
rq                247 kernel/sched_stats.h 	struct rq *rq = task_rq(prev);
rq                254 kernel/sched_stats.h 	if (prev != rq->idle)
rq                257 kernel/sched_stats.h 	if (next != rq->idle)
rq                 56 kernel/trace/trace_sched_switch.c 	struct rq *__rq;
rq                110 kernel/trace/trace_sched_switch.c 	struct rq *__rq;
rq                193 kernel/trace/trace_sched_wakeup.c 	struct rq *__rq;
rq                295 kernel/trace/trace_sched_wakeup.c 	struct rq *__rq;
rq                191 net/9p/trans_fd.c 	struct work_struct rq;
rq                415 net/9p/trans_fd.c 	INIT_WORK(&m->rq, p9_read_work);
rq                457 net/9p/trans_fd.c 	cancel_work_sync(&m->rq);
rq                530 net/9p/trans_fd.c 			queue_work(p9_mux_wq, &m->rq);
rq                705 net/9p/trans_fd.c 	m = container_of(work, struct p9_conn, rq);
rq                825 net/9p/trans_fd.c 			queue_work(p9_mux_wq, &m->rq);
rq                458 net/atm/br2684.c 	struct sk_buff_head *rq;
rq                515 net/atm/br2684.c 	rq = &sk_atm(atmvcc)->sk_receive_queue;
rq                517 net/atm/br2684.c 	spin_lock_irqsave(&rq->lock, flags);
rq                518 net/atm/br2684.c 	if (skb_queue_empty(rq)) {
rq                522 net/atm/br2684.c 		rq->prev->next = NULL;
rq                523 net/atm/br2684.c 		skb = rq->next;
rq                525 net/atm/br2684.c 	rq->prev = rq->next = (struct sk_buff *)rq;
rq                526 net/atm/br2684.c 	rq->qlen = 0;
rq                527 net/atm/br2684.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq                455 net/atm/clip.c 	struct sk_buff_head *rq;
rq                477 net/atm/clip.c 	rq = &sk_atm(vcc)->sk_receive_queue;
rq                479 net/atm/clip.c 	spin_lock_irqsave(&rq->lock, flags);
rq                480 net/atm/clip.c 	if (skb_queue_empty(rq)) {
rq                484 net/atm/clip.c 		rq->prev->next = NULL;
rq                485 net/atm/clip.c 		skb = rq->next;
rq                487 net/atm/clip.c 	rq->prev = rq->next = (struct sk_buff *)rq;
rq                488 net/atm/clip.c 	rq->qlen = 0;
rq                489 net/atm/clip.c 	spin_unlock_irqrestore(&rq->lock, flags);
rq                115 net/bridge/br_ioctl.c 	if (copy_from_user(args, rq->ifr_data, sizeof(args)))
rq                406 net/bridge/br_ioctl.c 		return old_dev_ioctl(dev, rq, cmd);
rq                410 net/bridge/br_ioctl.c 		return add_del_if(br, rq->ifr_ifindex, cmd == SIOCBRADDIF);
rq               1866 net/key/af_key.c 	if (rq->sadb_x_ipsecrequest_mode == 0)
rq               1869 net/key/af_key.c 	t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */
rq               1870 net/key/af_key.c 	if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
rq               1873 net/key/af_key.c 	if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE)
rq               1875 net/key/af_key.c 	else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
rq               1876 net/key/af_key.c 		t->reqid = rq->sadb_x_ipsecrequest_reqid;
rq               1885 net/key/af_key.c 		u8 *sa = (u8 *) (rq + 1);
rq               1912 net/key/af_key.c 	struct sadb_x_ipsecrequest *rq = (void*)(pol+1);
rq               1915 net/key/af_key.c 		if ((err = parse_ipsecrequest(xp, rq)) < 0)
rq               1917 net/key/af_key.c 		len -= rq->sadb_x_ipsecrequest_len;
rq               1918 net/key/af_key.c 		rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
rq               2067 net/key/af_key.c 		struct sadb_x_ipsecrequest *rq;
rq               2080 net/key/af_key.c 		rq = (void*)skb_put(skb, req_size);
rq               2082 net/key/af_key.c 		memset(rq, 0, sizeof(*rq));
rq               2083 net/key/af_key.c 		rq->sadb_x_ipsecrequest_len = req_size;
rq               2084 net/key/af_key.c 		rq->sadb_x_ipsecrequest_proto = t->id.proto;
rq               2087 net/key/af_key.c 		rq->sadb_x_ipsecrequest_mode = mode;
rq               2088 net/key/af_key.c 		rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE;
rq               2090 net/key/af_key.c 			rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE;
rq               2092 net/key/af_key.c 			rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE;
rq               2093 net/key/af_key.c 		rq->sadb_x_ipsecrequest_reqid = t->reqid;
rq               2096 net/key/af_key.c 			u8 *sa = (void *)(rq + 1);
rq               2468 net/key/af_key.c 	struct sadb_x_ipsecrequest *rq;
rq               2521 net/key/af_key.c 	rq = (struct sadb_x_ipsecrequest *)(pol + 1);
rq               2528 net/key/af_key.c 		ret = ipsecrequests_to_migrate(rq, len, &m[i]);
rq               2533 net/key/af_key.c 			rq = (struct sadb_x_ipsecrequest *)((u8 *)rq + ret);
rq               3368 net/key/af_key.c 	struct sadb_x_ipsecrequest *rq;
rq               3376 net/key/af_key.c 	rq = (struct sadb_x_ipsecrequest *)skb_put(skb, size_req);
rq               3377 net/key/af_key.c 	memset(rq, 0, size_req);
rq               3378 net/key/af_key.c 	rq->sadb_x_ipsecrequest_len = size_req;
rq               3379 net/key/af_key.c 	rq->sadb_x_ipsecrequest_proto = proto;
rq               3380 net/key/af_key.c 	rq->sadb_x_ipsecrequest_mode = mode;
rq               3381 net/key/af_key.c 	rq->sadb_x_ipsecrequest_level = level;
rq               3382 net/key/af_key.c 	rq->sadb_x_ipsecrequest_reqid = reqid;
rq               3384 net/key/af_key.c 	sa = (u8 *) (rq + 1);
rq                698 net/sunrpc/cache.c 	struct cache_request *rq;
rq                722 net/sunrpc/cache.c 	rq = container_of(rp->q.list.next, struct cache_request, q.list);
rq                723 net/sunrpc/cache.c 	BUG_ON(rq->q.reader);
rq                725 net/sunrpc/cache.c 		rq->readers++;
rq                728 net/sunrpc/cache.c 	if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
rq                731 net/sunrpc/cache.c 		list_move(&rp->q.list, &rq->q.list);
rq                734 net/sunrpc/cache.c 		if (rp->offset + count > rq->len)
rq                735 net/sunrpc/cache.c 			count = rq->len - rp->offset;
rq                737 net/sunrpc/cache.c 		if (copy_to_user(buf, rq->buf + rp->offset, count))
rq                740 net/sunrpc/cache.c 		if (rp->offset >= rq->len) {
rq                743 net/sunrpc/cache.c 			list_move(&rp->q.list, &rq->q.list);
rq                752 net/sunrpc/cache.c 		rq->readers--;
rq                753 net/sunrpc/cache.c 		if (rq->readers == 0 &&
rq                754 net/sunrpc/cache.c 		    !test_bit(CACHE_PENDING, &rq->item->flags)) {
rq                755 net/sunrpc/cache.c 			list_del(&rq->q.list);
rq                757 net/sunrpc/cache.c 			cache_put(rq->item, cd);
rq                758 net/sunrpc/cache.c 			kfree(rq->buf);
rq                759 net/sunrpc/cache.c 			kfree(rq);
rq                227 net/sunrpc/xprtrdma/xprt_rdma.h 	(rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_rsize)
rq                230 net/sunrpc/xprtrdma/xprt_rdma.h 	(rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_wsize)
rq                233 net/sunrpc/xprtrdma/xprt_rdma.h 	rpcx_to_rdmad(rq->rq_task->tk_xprt).padding