dio 141 fs/direct-io.c return dio->tail - dio->head;
dio 152 fs/direct-io.c nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES);
dio 154 fs/direct-io.c dio->curr_user_address, /* Where from? */
dio 156 fs/direct-io.c dio->rw == READ, /* Write to memory? */
dio 157 fs/direct-io.c &dio->pages[0]); /* Put results here */
dio 159 fs/direct-io.c if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) {
dio 166 fs/direct-io.c if (dio->page_errors == 0)
dio 167 fs/direct-io.c dio->page_errors = ret;
dio 169 fs/direct-io.c dio->pages[0] = page;
dio 170 fs/direct-io.c dio->head = 0;
dio 171 fs/direct-io.c dio->tail = 1;
dio 177 fs/direct-io.c dio->curr_user_address += ret * PAGE_SIZE;
dio 178 fs/direct-io.c dio->curr_page += ret;
dio 179 fs/direct-io.c dio->head = 0;
dio 180 fs/direct-io.c dio->tail = ret;
dio 195 fs/direct-io.c if (dio_pages_present(dio) == 0) {
dio 198 fs/direct-io.c ret = dio_refill_pages(dio);
dio 201 fs/direct-io.c BUG_ON(dio_pages_present(dio) == 0);
dio 203 fs/direct-io.c return dio->pages[dio->head++];
dio 232 fs/direct-io.c if (dio->result) {
dio 233 fs/direct-io.c transferred = dio->result;
dio 236 fs/direct-io.c if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
dio 237 fs/direct-io.c transferred = dio->i_size - offset;
dio 240 fs/direct-io.c if (dio->end_io && dio->result)
dio 241 fs/direct-io.c dio->end_io(dio->iocb, offset, transferred,
dio 242 fs/direct-io.c dio->map_bh.b_private);
dio 243 fs/direct-io.c if (dio->lock_type == DIO_LOCKING)
dio 245 fs/direct-io.c up_read_non_owner(&dio->inode->i_alloc_sem);
dio 248 fs/direct-io.c ret = dio->page_errors;
dio 250 fs/direct-io.c ret = dio->io_error;
dio 263 fs/direct-io.c struct dio *dio = bio->bi_private;
dio 268 fs/direct-io.c dio_bio_complete(dio, bio);
dio 270 fs/direct-io.c spin_lock_irqsave(&dio->bio_lock, flags);
dio 271 fs/direct-io.c remaining = --dio->refcount;
dio 272 fs/direct-io.c if (remaining == 1 && dio->waiter)
dio 273 fs/direct-io.c wake_up_process(dio->waiter);
dio 274 fs/direct-io.c spin_unlock_irqrestore(&dio->bio_lock, flags);
dio 277 fs/direct-io.c int ret = dio_complete(dio, dio->iocb->ki_pos, 0);
dio 278 fs/direct-io.c aio_complete(dio->iocb, ret, 0);
dio 279 fs/direct-io.c kfree(dio);
dio 292 fs/direct-io.c struct dio *dio = bio->bi_private;
dio 295 fs/direct-io.c spin_lock_irqsave(&dio->bio_lock, flags);
dio 296 fs/direct-io.c bio->bi_private = dio->bio_list;
dio 297 fs/direct-io.c dio->bio_list = bio;
dio 298 fs/direct-io.c if (--dio->refcount == 1 && dio->waiter)
dio 299 fs/direct-io.c wake_up_process(dio->waiter);
dio 300 fs/direct-io.c spin_unlock_irqrestore(&dio->bio_lock, flags);
dio 315 fs/direct-io.c if (dio->is_async)
dio 320 fs/direct-io.c dio->bio = bio;
dio 333 fs/direct-io.c struct bio *bio = dio->bio;
dio 336 fs/direct-io.c bio->bi_private = dio;
dio 338 fs/direct-io.c spin_lock_irqsave(&dio->bio_lock, flags);
dio 339 fs/direct-io.c dio->refcount++;
dio 340 fs/direct-io.c spin_unlock_irqrestore(&dio->bio_lock, flags);
dio 342 fs/direct-io.c if (dio->is_async && dio->rw == READ)
dio 345 fs/direct-io.c submit_bio(dio->rw, bio);
dio 347 fs/direct-io.c dio->bio = NULL;
dio 348 fs/direct-io.c dio->boundary = 0;
dio 356 fs/direct-io.c while (dio_pages_present(dio))
dio 357 fs/direct-io.c page_cache_release(dio_get_page(dio));
dio 371 fs/direct-io.c spin_lock_irqsave(&dio->bio_lock, flags);
dio 379 fs/direct-io.c while (dio->refcount > 1 && dio->bio_list == NULL) {
dio 381 fs/direct-io.c dio->waiter = current;
dio 382 fs/direct-io.c spin_unlock_irqrestore(&dio->bio_lock, flags);
dio 385 fs/direct-io.c spin_lock_irqsave(&dio->bio_lock, flags);
dio 386 fs/direct-io.c dio->waiter = NULL;
dio 388 fs/direct-io.c if (dio->bio_list) {
dio 389 fs/direct-io.c bio = dio->bio_list;
dio 390 fs/direct-io.c dio->bio_list = bio->bi_private;
dio 392 fs/direct-io.c spin_unlock_irqrestore(&dio->bio_lock, flags);
dio 406 fs/direct-io.c dio->io_error = -EIO;
dio 408 fs/direct-io.c if (dio->is_async && dio->rw == READ) {
dio 414 fs/direct-io.c if (dio->rw == READ && !PageCompound(page))
dio 434 fs/direct-io.c bio = dio_await_one(dio);
dio 436 fs/direct-io.c dio_bio_complete(dio, bio);
dio 451 fs/direct-io.c if (dio->reap_counter++ >= 64) {
dio 452 fs/direct-io.c while (dio->bio_list) {
dio 457 fs/direct-io.c spin_lock_irqsave(&dio->bio_lock, flags);
dio 458 fs/direct-io.c bio = dio->bio_list;
dio 459 fs/direct-io.c dio->bio_list = bio->bi_private;
dio 460 fs/direct-io.c spin_unlock_irqrestore(&dio->bio_lock, flags);
dio 461 fs/direct-io.c ret2 = dio_bio_complete(dio, bio);
dio 465 fs/direct-io.c dio->reap_counter = 0;
dio 496 fs/direct-io.c struct buffer_head *map_bh = &dio->map_bh;
dio 507 fs/direct-io.c ret = dio->page_errors;
dio 509 fs/direct-io.c BUG_ON(dio->block_in_file >= dio->final_block_in_request);
dio 510 fs/direct-io.c fs_startblk = dio->block_in_file >> dio->blkfactor;
dio 511 fs/direct-io.c dio_count = dio->final_block_in_request - dio->block_in_file;
dio 512 fs/direct-io.c fs_count = dio_count >> dio->blkfactor;
dio 513 fs/direct-io.c blkmask = (1 << dio->blkfactor) - 1;
dio 518 fs/direct-io.c map_bh->b_size = fs_count << dio->inode->i_blkbits;
dio 520 fs/direct-io.c create = dio->rw & WRITE;
dio 521 fs/direct-io.c if (dio->lock_type == DIO_LOCKING) {
dio 522 fs/direct-io.c if (dio->block_in_file < (i_size_read(dio->inode) >>
dio 523 fs/direct-io.c dio->blkbits))
dio 525 fs/direct-io.c } else if (dio->lock_type == DIO_NO_LOCKING) {
dio 535 fs/direct-io.c ret = (*dio->get_block)(dio->inode, fs_startblk,
dio 549 fs/direct-io.c ret = dio_bio_reap(dio);
dio 552 fs/direct-io.c sector = start_sector << (dio->blkbits - 9);
dio 553 fs/direct-io.c nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
dio 555 fs/direct-io.c ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
dio 556 fs/direct-io.c dio->boundary = 0;
dio 572 fs/direct-io.c ret = bio_add_page(dio->bio, dio->cur_page,
dio 573 fs/direct-io.c dio->cur_page_len, dio->cur_page_offset);
dio 574 fs/direct-io.c if (ret == dio->cur_page_len) {
dio 578 fs/direct-io.c if ((dio->cur_page_len + dio->cur_page_offset) == PAGE_SIZE)
dio 579 fs/direct-io.c dio->pages_in_io--;
dio 580 fs/direct-io.c page_cache_get(dio->cur_page);
dio 581 fs/direct-io.c dio->final_block_in_bio = dio->cur_page_block +
dio 582 fs/direct-io.c (dio->cur_page_len >> dio->blkbits);
dio 604 fs/direct-io.c if (dio->bio) {
dio 608 fs/direct-io.c if (dio->final_block_in_bio != dio->cur_page_block)
dio 609 fs/direct-io.c dio_bio_submit(dio);
dio 614 fs/direct-io.c if (dio->boundary)
dio 615 fs/direct-io.c dio_bio_submit(dio);
dio 618 fs/direct-io.c if (dio->bio == NULL) {
dio 619 fs/direct-io.c ret = dio_new_bio(dio, dio->cur_page_block);
dio 624 fs/direct-io.c if (dio_bio_add_page(dio) != 0) {
dio 625 fs/direct-io.c dio_bio_submit(dio);
dio 626 fs/direct-io.c ret = dio_new_bio(dio, dio->cur_page_block);
dio 628 fs/direct-io.c ret = dio_bio_add_page(dio);
dio 659 fs/direct-io.c if (dio->rw & WRITE) {
dio 669 fs/direct-io.c if ( (dio->cur_page == page) &&
dio 670 fs/direct-io.c (dio->cur_page_offset + dio->cur_page_len == offset) &&
dio 671 fs/direct-io.c (dio->cur_page_block +
dio 672 fs/direct-io.c (dio->cur_page_len >> dio->blkbits) == blocknr)) {
dio 673 fs/direct-io.c dio->cur_page_len += len;
dio 679 fs/direct-io.c if (dio->boundary) {
dio 680 fs/direct-io.c ret = dio_send_cur_page(dio);
dio 681 fs/direct-io.c page_cache_release(dio->cur_page);
dio 682 fs/direct-io.c dio->cur_page = NULL;
dio 690 fs/direct-io.c if (dio->cur_page) {
dio 691 fs/direct-io.c ret = dio_send_cur_page(dio);
dio 692 fs/direct-io.c page_cache_release(dio->cur_page);
dio 693 fs/direct-io.c dio->cur_page = NULL;
dio 699 fs/direct-io.c dio->cur_page = page;
dio 700 fs/direct-io.c dio->cur_page_offset = offset;
dio 701 fs/direct-io.c dio->cur_page_len = len;
dio 702 fs/direct-io.c dio->cur_page_block = blocknr;
dio 717 fs/direct-io.c nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits;
dio 720 fs/direct-io.c unmap_underlying_metadata(dio->map_bh.b_bdev,
dio 721 fs/direct-io.c dio->map_bh.b_blocknr + i);
dio 741 fs/direct-io.c dio->start_zero_done = 1;
dio 742 fs/direct-io.c if (!dio->blkfactor || !buffer_new(&dio->map_bh))
dio 745 fs/direct-io.c dio_blocks_per_fs_block = 1 << dio->blkfactor;
dio 746 fs/direct-io.c this_chunk_blocks = dio->block_in_file & (dio_blocks_per_fs_block - 1);
dio 758 fs/direct-io.c this_chunk_bytes = this_chunk_blocks << dio->blkbits;
dio 761 fs/direct-io.c if (submit_page_section(dio, page, 0, this_chunk_bytes,
dio 762 fs/direct-io.c dio->next_block_for_io))
dio 765 fs/direct-io.c dio->next_block_for_io += this_chunk_blocks;
dio 786 fs/direct-io.c const unsigned blkbits = dio->blkbits;
dio 790 fs/direct-io.c struct buffer_head *map_bh = &dio->map_bh;
dio 794 fs/direct-io.c block_in_page = dio->first_block_in_page;
dio 796 fs/direct-io.c while (dio->block_in_file < dio->final_block_in_request) {
dio 797 fs/direct-io.c page = dio_get_page(dio);
dio 809 fs/direct-io.c if (dio->blocks_available == 0) {
dio 816 fs/direct-io.c ret = get_more_blocks(dio);
dio 824 fs/direct-io.c dio->blocks_available =
dio 825 fs/direct-io.c map_bh->b_size >> dio->blkbits;
dio 826 fs/direct-io.c dio->next_block_for_io =
dio 827 fs/direct-io.c map_bh->b_blocknr << dio->blkfactor;
dio 829 fs/direct-io.c clean_blockdev_aliases(dio);
dio 831 fs/direct-io.c if (!dio->blkfactor)
dio 834 fs/direct-io.c blkmask = (1 << dio->blkfactor) - 1;
dio 835 fs/direct-io.c dio_remainder = (dio->block_in_file & blkmask);
dio 849 fs/direct-io.c dio->next_block_for_io += dio_remainder;
dio 850 fs/direct-io.c dio->blocks_available -= dio_remainder;
dio 858 fs/direct-io.c if (dio->rw & WRITE) {
dio 867 fs/direct-io.c i_size_aligned = ALIGN(i_size_read(dio->inode),
dio 869 fs/direct-io.c if (dio->block_in_file >=
dio 877 fs/direct-io.c dio->block_in_file++;
dio 887 fs/direct-io.c if (unlikely(dio->blkfactor && !dio->start_zero_done))
dio 888 fs/direct-io.c dio_zero_block(dio, 0);
dio 894 fs/direct-io.c this_chunk_blocks = dio->blocks_available;
dio 898 fs/direct-io.c u = dio->final_block_in_request - dio->block_in_file;
dio 904 fs/direct-io.c dio->boundary = buffer_boundary(map_bh);
dio 905 fs/direct-io.c ret = submit_page_section(dio, page, offset_in_page,
dio 906 fs/direct-io.c this_chunk_bytes, dio->next_block_for_io);
dio 911 fs/direct-io.c dio->next_block_for_io += this_chunk_blocks;
dio 913 fs/direct-io.c dio->block_in_file += this_chunk_blocks;
dio 915 fs/direct-io.c dio->blocks_available -= this_chunk_blocks;
dio 917 fs/direct-io.c BUG_ON(dio->block_in_file > dio->final_block_in_request);
dio 918 fs/direct-io.c if (dio->block_in_file == dio->final_block_in_request)
dio 946 fs/direct-io.c dio->inode = inode;
dio 947 fs/direct-io.c dio->rw = rw;
dio 948 fs/direct-io.c dio->blkbits = blkbits;
dio 949 fs/direct-io.c dio->blkfactor = inode->i_blkbits - blkbits;
dio 950 fs/direct-io.c dio->block_in_file = offset >> blkbits;
dio 952 fs/direct-io.c dio->get_block = get_block;
dio 953 fs/direct-io.c dio->end_io = end_io;
dio 954 fs/direct-io.c dio->final_block_in_bio = -1;
dio 955 fs/direct-io.c dio->next_block_for_io = -1;
dio 957 fs/direct-io.c dio->iocb = iocb;
dio 958 fs/direct-io.c dio->i_size = i_size_read(inode);
dio 960 fs/direct-io.c spin_lock_init(&dio->bio_lock);
dio 961 fs/direct-io.c dio->refcount = 1;
dio 967 fs/direct-io.c if (unlikely(dio->blkfactor))
dio 968 fs/direct-io.c dio->pages_in_io = 2;
dio 972 fs/direct-io.c dio->pages_in_io +=
dio 979 fs/direct-io.c dio->size += bytes = iov[seg].iov_len;
dio 982 fs/direct-io.c dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
dio 983 fs/direct-io.c dio->final_block_in_request = dio->block_in_file +
dio 986 fs/direct-io.c dio->head = 0;
dio 987 fs/direct-io.c dio->tail = 0;
dio 988 fs/direct-io.c dio->curr_page = 0;
dio 990 fs/direct-io.c dio->total_pages = 0;
dio 992 fs/direct-io.c dio->total_pages++;
dio 995 fs/direct-io.c dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
dio 996 fs/direct-io.c dio->curr_user_address = user_addr;
dio 998 fs/direct-io.c ret = do_direct_IO(dio);
dio 1000 fs/direct-io.c dio->result += iov[seg].iov_len -
dio 1001 fs/direct-io.c ((dio->final_block_in_request - dio->block_in_file) <<
dio 1005 fs/direct-io.c dio_cleanup(dio);
dio 1021 fs/direct-io.c dio_zero_block(dio, 1);
dio 1023 fs/direct-io.c if (dio->cur_page) {
dio 1024 fs/direct-io.c ret2 = dio_send_cur_page(dio);
dio 1027 fs/direct-io.c page_cache_release(dio->cur_page);
dio 1028 fs/direct-io.c dio->cur_page = NULL;
dio 1030 fs/direct-io.c if (dio->bio)
dio 1031 fs/direct-io.c dio_bio_submit(dio);
dio 1040 fs/direct-io.c dio_cleanup(dio);
dio 1047 fs/direct-io.c if ((rw == READ) && (dio->lock_type == DIO_LOCKING))
dio 1048 fs/direct-io.c mutex_unlock(&dio->inode->i_mutex);
dio 1058 fs/direct-io.c if (dio->is_async && ret == 0 && dio->result &&
dio 1059 fs/direct-io.c ((rw & READ) || (dio->result == dio->size)))
dio 1063 fs/direct-io.c dio_await_completion(dio);
dio 1076 fs/direct-io.c spin_lock_irqsave(&dio->bio_lock, flags);
dio 1077 fs/direct-io.c ret2 = --dio->refcount;
dio 1078 fs/direct-io.c spin_unlock_irqrestore(&dio->bio_lock, flags);
dio 1081 fs/direct-io.c ret = dio_complete(dio, offset, ret);
dio 1082 fs/direct-io.c kfree(dio);
dio 1124 fs/direct-io.c struct dio *dio;
dio 1156 fs/direct-io.c dio = kzalloc(sizeof(*dio), GFP_KERNEL);
dio 1158 fs/direct-io.c if (!dio)
dio 1170 fs/direct-io.c dio->lock_type = dio_lock_type;
dio 1185 fs/direct-io.c kfree(dio);
dio 1206 fs/direct-io.c dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
dio 1210 fs/direct-io.c nr_segs, blkbits, get_block, end_io, dio);