ioc 185 block/as-iosched.c spin_lock_irq(&ioc->lock);
ioc 186 block/as-iosched.c if (ioc->aic)
ioc 187 block/as-iosched.c free_as_io_context(ioc->aic);
ioc 188 block/as-iosched.c ioc->aic = NULL;
ioc 189 block/as-iosched.c spin_unlock_irq(&ioc->lock);
ioc 229 block/as-iosched.c struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
ioc 230 block/as-iosched.c if (ioc && !ioc->aic) {
ioc 231 block/as-iosched.c ioc->aic = alloc_as_io_context();
ioc 232 block/as-iosched.c if (!ioc->aic) {
ioc 233 block/as-iosched.c put_io_context(ioc);
ioc 234 block/as-iosched.c ioc = NULL;
ioc 237 block/as-iosched.c return ioc;
ioc 652 block/as-iosched.c struct io_context *ioc;
ioc 655 block/as-iosched.c ioc = ad->io_context;
ioc 656 block/as-iosched.c BUG_ON(!ioc);
ioc 657 block/as-iosched.c spin_lock(&ioc->lock);
ioc 659 block/as-iosched.c if (rq && ioc == RQ_IOC(rq)) {
ioc 661 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 670 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 674 block/as-iosched.c aic = ioc->aic;
ioc 676 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 682 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 688 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 709 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 719 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 726 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 730 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 735 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 738 block/as-iosched.c spin_unlock(&ioc->lock);
ioc 904 block/as-iosched.c struct io_context *ioc;
ioc 908 block/as-iosched.c ioc = RQ_IOC(rq);
ioc 909 block/as-iosched.c if (ioc && ioc->aic) {
ioc 910 block/as-iosched.c BUG_ON(!atomic_read(&ioc->aic->nr_queued));
ioc 911 block/as-iosched.c atomic_dec(&ioc->aic->nr_queued);
ioc 990 block/as-iosched.c struct io_context *ioc = RQ_IOC(rq);
ioc 992 block/as-iosched.c copy_io_context(&ad->io_context, &ioc);
ioc 1330 block/as-iosched.c struct io_context *ioc;
ioc 1333 block/as-iosched.c ioc = as_get_io_context(q->node);
ioc 1334 block/as-iosched.c if (ad->io_context == ioc)
ioc 1336 block/as-iosched.c put_io_context(ioc);
ioc 652 block/blk-core.c if (!ioc)
ioc 660 block/blk-core.c return ioc->nr_batch_requests == q->nr_batching ||
ioc 661 block/blk-core.c (ioc->nr_batch_requests > 0
ioc 662 block/blk-core.c && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
ioc 673 block/blk-core.c if (!ioc || ioc_batching(q, ioc))
ioc 676 block/blk-core.c ioc->nr_batch_requests = q->nr_batching;
ioc 677 block/blk-core.c ioc->last_waited = jiffies;
ioc 724 block/blk-core.c struct io_context *ioc = NULL;
ioc 734 block/blk-core.c ioc = current_io_context(GFP_ATOMIC, q->node);
ioc 742 block/blk-core.c ioc_set_batching(q, ioc);
ioc 746 block/blk-core.c && !ioc_batching(q, ioc)) {
ioc 808 block/blk-core.c if (ioc_batching(q, ioc))
ioc 809 block/blk-core.c ioc->nr_batch_requests--;
ioc 831 block/blk-core.c struct io_context *ioc;
ioc 849 block/blk-core.c ioc = current_io_context(GFP_NOIO, q->node);
ioc 850 block/blk-core.c ioc_set_batching(q, ioc);
ioc 20 block/blk-ioc.c if (!hlist_empty(&ioc->cic_list)) {
ioc 23 block/blk-ioc.c cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
ioc 25 block/blk-ioc.c cic->dtor(ioc);
ioc 35 block/blk-ioc.c if (ioc == NULL)
ioc 38 block/blk-ioc.c BUG_ON(atomic_read(&ioc->refcount) == 0);
ioc 40 block/blk-ioc.c if (atomic_dec_and_test(&ioc->refcount)) {
ioc 42 block/blk-ioc.c if (ioc->aic && ioc->aic->dtor)
ioc 43 block/blk-ioc.c ioc->aic->dtor(ioc->aic);
ioc 44 block/blk-ioc.c cfq_dtor(ioc);
ioc 47 block/blk-ioc.c kmem_cache_free(iocontext_cachep, ioc);
ioc 58 block/blk-ioc.c if (!hlist_empty(&ioc->cic_list)) {
ioc 61 block/blk-ioc.c cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
ioc 63 block/blk-ioc.c cic->exit(ioc);
ioc 71 block/blk-ioc.c struct io_context *ioc;
ioc 74 block/blk-ioc.c ioc = current->io_context;
ioc 78 block/blk-ioc.c if (atomic_dec_and_test(&ioc->nr_tasks)) {
ioc 79 block/blk-ioc.c if (ioc->aic && ioc->aic->exit)
ioc 80 block/blk-ioc.c ioc->aic->exit(ioc->aic);
ioc 81 block/blk-ioc.c cfq_exit(ioc);
ioc 83 block/blk-ioc.c put_io_context(ioc);
ioc 806 block/cfq-iosched.c put_io_context(cfqd->active_cic->ioc);
ioc 908 block/cfq-iosched.c if (!cic || !atomic_read(&cic->ioc->nr_tasks))
ioc 1063 block/cfq-iosched.c atomic_inc(&RQ_CIC(rq)->ioc->refcount);
ioc 1198 block/cfq-iosched.c hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
ioc 1199 block/cfq-iosched.c func(ioc, cic);
ioc 1210 block/cfq-iosched.c __call_for_each_cic(ioc, func);
ioc 1249 block/cfq-iosched.c spin_lock_irqsave(&ioc->lock, flags);
ioc 1250 block/cfq-iosched.c radix_tree_delete(&ioc->radix_root, cic->dead_key);
ioc 1252 block/cfq-iosched.c spin_unlock_irqrestore(&ioc->lock, flags);
ioc 1270 block/cfq-iosched.c __call_for_each_cic(ioc, cic_free_func);
ioc 1286 block/cfq-iosched.c struct io_context *ioc = cic->ioc;
ioc 1297 block/cfq-iosched.c if (ioc->ioc_data == cic)
ioc 1298 block/cfq-iosched.c rcu_assign_pointer(ioc->ioc_data, NULL);
ioc 1332 block/cfq-iosched.c call_for_each_cic(ioc, cfq_exit_single_io_context);
ioc 1362 block/cfq-iosched.c ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
ioc 1374 block/cfq-iosched.c cfqq->ioprio = task_ioprio(ioc);
ioc 1378 block/cfq-iosched.c cfqq->ioprio = task_ioprio(ioc);
ioc 1411 block/cfq-iosched.c new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
ioc 1427 block/cfq-iosched.c call_for_each_cic(ioc, changed_ioprio);
ioc 1428 block/cfq-iosched.c ioc->ioprio_changed = 0;
ioc 1439 block/cfq-iosched.c cic = cfq_cic_lookup(cfqd, ioc);
ioc 1477 block/cfq-iosched.c cfq_init_prio_data(cfqq, ioc);
ioc 1515 block/cfq-iosched.c const int ioprio = task_ioprio(ioc);
ioc 1516 block/cfq-iosched.c const int ioprio_class = task_ioprio_class(ioc);
ioc 1526 block/cfq-iosched.c cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
ioc 1554 block/cfq-iosched.c spin_lock_irqsave(&ioc->lock, flags);
ioc 1556 block/cfq-iosched.c BUG_ON(ioc->ioc_data == cic);
ioc 1558 block/cfq-iosched.c radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
ioc 1560 block/cfq-iosched.c spin_unlock_irqrestore(&ioc->lock, flags);
ioc 1572 block/cfq-iosched.c if (unlikely(!ioc))
ioc 1580 block/cfq-iosched.c cic = rcu_dereference(ioc->ioc_data);
ioc 1587 block/cfq-iosched.c cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
ioc 1594 block/cfq-iosched.c cfq_drop_dead_cic(cfqd, ioc, cic);
ioc 1599 block/cfq-iosched.c spin_lock_irqsave(&ioc->lock, flags);
ioc 1600 block/cfq-iosched.c rcu_assign_pointer(ioc->ioc_data, cic);
ioc 1601 block/cfq-iosched.c spin_unlock_irqrestore(&ioc->lock, flags);
ioc 1621 block/cfq-iosched.c cic->ioc = ioc;
ioc 1624 block/cfq-iosched.c spin_lock_irqsave(&ioc->lock, flags);
ioc 1625 block/cfq-iosched.c ret = radix_tree_insert(&ioc->radix_root,
ioc 1628 block/cfq-iosched.c hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
ioc 1629 block/cfq-iosched.c spin_unlock_irqrestore(&ioc->lock, flags);
ioc 1654 block/cfq-iosched.c struct io_context *ioc = NULL;
ioc 1659 block/cfq-iosched.c ioc = get_io_context(gfp_mask, cfqd->queue->node);
ioc 1660 block/cfq-iosched.c if (!ioc)
ioc 1663 block/cfq-iosched.c cic = cfq_cic_lookup(cfqd, ioc);
ioc 1671 block/cfq-iosched.c if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
ioc 1676 block/cfq-iosched.c if (unlikely(ioc->ioprio_changed))
ioc 1677 block/cfq-iosched.c cfq_ioc_set_ioprio(ioc);
ioc 1683 block/cfq-iosched.c put_io_context(ioc);
ioc 1744 block/cfq-iosched.c if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
ioc 1883 block/cfq-iosched.c cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
ioc 2018 block/cfq-iosched.c cfq_init_prio_data(cfqq, cic->ioc);
ioc 2040 block/cfq-iosched.c put_io_context(RQ_CIC(rq)->ioc);
ioc 2073 block/cfq-iosched.c cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
ioc 2093 block/cfq-iosched.c put_io_context(cic->ioc);
ioc 33 fs/ioprio.c struct io_context *ioc;
ioc 45 fs/ioprio.c ioc = task->io_context;
ioc 48 fs/ioprio.c if (ioc)
ioc 51 fs/ioprio.c ioc = alloc_io_context(GFP_ATOMIC, -1);
ioc 52 fs/ioprio.c if (!ioc) {
ioc 56 fs/ioprio.c task->io_context = ioc;
ioc 60 fs/ioprio.c ioc->ioprio = ioprio;
ioc 61 fs/ioprio.c ioc->ioprio_changed = 1;
ioc 230 include/asm-parisc/dma-mapping.h struct ioc;
ioc 85 include/asm-parisc/ropes.h struct ioc ioc[MAX_IOC];
ioc 40 include/linux/iocontext.h struct io_context *ioc;
ioc 94 include/linux/iocontext.h if (ioc && atomic_inc_not_zero(&ioc->refcount)) {
ioc 95 include/linux/iocontext.h atomic_inc(&ioc->nr_tasks);
ioc 96 include/linux/iocontext.h return ioc;
ioc 51 include/linux/ioprio.h if (ioprio_valid(ioc->ioprio))
ioc 52 include/linux/ioprio.h return IOPRIO_PRIO_DATA(ioc->ioprio);
ioc 59 include/linux/ioprio.h if (ioprio_valid(ioc->ioprio))
ioc 60 include/linux/ioprio.h return IOPRIO_PRIO_CLASS(ioc->ioprio);
ioc 717 kernel/fork.c struct io_context *ioc = current->io_context;
ioc 719 kernel/fork.c if (!ioc)
ioc 725 kernel/fork.c tsk->io_context = ioc_task_link(ioc);
ioc 728 kernel/fork.c } else if (ioprio_valid(ioc->ioprio)) {
ioc 733 kernel/fork.c tsk->io_context->ioprio = ioc->ioprio;
ioc 839 net/rxrpc/ar-ack.c int genbit, loop, nbit, ioc, ret, mtu;
ioc 1197 net/rxrpc/ar-ack.c ioc = 1;
ioc 1199 net/rxrpc/ar-ack.c ioc = 5;
ioc 1205 net/rxrpc/ar-ack.c ioc = 4;
ioc 1210 net/rxrpc/ar-ack.c ioc = 3;
ioc 1214 net/rxrpc/ar-ack.c ioc = 2;
ioc 1219 net/rxrpc/ar-ack.c &msg, iov, ioc, len);
ioc 533 net/rxrpc/ar-output.c int ret, ioc, segment, copied;
ioc 546 net/rxrpc/ar-output.c ioc = msg->msg_iovlen - 1;
ioc 648 net/rxrpc/ar-output.c while (segment == 0 && ioc > 0) {
ioc 652 net/rxrpc/ar-output.c ioc--;
ioc 656 net/rxrpc/ar-output.c ioc = 0;