sg 426 arch/x86/kernel/pci-calgary_64.c for_each_sg(sg, s, nelems, i) { sg 450 arch/x86/kernel/pci-calgary_64.c calgary_unmap_sg(dev, sg, nelems, direction); sg 451 arch/x86/kernel/pci-calgary_64.c for_each_sg(sg, s, nelems, i) { sg 452 arch/x86/kernel/pci-calgary_64.c sg->dma_address = bad_dma_address; sg 453 arch/x86/kernel/pci-calgary_64.c sg->dma_length = 0; sg 305 arch/x86/kernel/pci-gart_64.c for_each_sg(sg, s, nents, i) { sg 323 arch/x86/kernel/pci-gart_64.c for_each_sg(sg, s, nents, i) { sg 330 arch/x86/kernel/pci-gart_64.c gart_unmap_sg(dev, sg, i, dir); sg 332 arch/x86/kernel/pci-gart_64.c sg[0].dma_length = 0; sg 418 arch/x86/kernel/pci-gart_64.c start_sg = sgmap = sg; sg 422 arch/x86/kernel/pci-gart_64.c for_each_sg(sg, s, nents, i) { sg 469 arch/x86/kernel/pci-gart_64.c gart_unmap_sg(dev, sg, out, dir); sg 473 arch/x86/kernel/pci-gart_64.c out = dma_map_sg_nonforce(dev, sg, nents, dir); sg 481 arch/x86/kernel/pci-gart_64.c for_each_sg(sg, s, nents, i) sg 62 arch/x86/kernel/pci-nommu.c WARN_ON(nents == 0 || sg[0].length == 0); sg 64 arch/x86/kernel/pci-nommu.c for_each_sg(sg, s, nents, i) { sg 73 block/blk-integrity.c struct scatterlist *sg; sg 77 block/blk-integrity.c sg = NULL; sg 86 block/blk-integrity.c sg->length += iv->bv_len; sg 89 block/blk-integrity.c if (!sg) sg 90 block/blk-integrity.c sg = sglist; sg 92 block/blk-integrity.c sg->page_link &= ~0x02; sg 93 block/blk-integrity.c sg = sg_next(sg); sg 96 block/blk-integrity.c sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset); sg 103 block/blk-integrity.c if (sg) sg 104 block/blk-integrity.c sg_mark_end(sg); sg 137 block/blk-merge.c struct scatterlist *sg; sg 147 block/blk-merge.c sg = NULL; sg 152 block/blk-merge.c if (sg->length + nbytes > q->max_segment_size) sg 160 block/blk-merge.c sg->length += nbytes; sg 163 block/blk-merge.c if (!sg) sg 164 block/blk-merge.c sg = sglist; sg 176 block/blk-merge.c sg->page_link &= ~0x02; sg 177 block/blk-merge.c sg = sg_next(sg); sg 180 block/blk-merge.c sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); sg 191 block/blk-merge.c sg->length += pad_len; sg 199 block/blk-merge.c sg->page_link &= ~0x02; sg 200 block/blk-merge.c sg = sg_next(sg); sg 201 block/blk-merge.c sg_set_page(sg, virt_to_page(q->dma_drain_buffer), sg 209 block/blk-merge.c if (sg) sg 210 block/blk-merge.c sg_mark_end(sg); sg 46 crypto/ahash.c struct scatterlist *sg; sg 48 crypto/ahash.c sg = walk->sg; sg 49 crypto/ahash.c walk->pg = sg_page(sg); sg 50 crypto/ahash.c walk->offset = sg->offset; sg 51 crypto/ahash.c walk->entrylen = sg->length; sg 93 crypto/ahash.c walk->sg = scatterwalk_sg_next(walk->sg); sg 108 crypto/ahash.c walk->sg = req->src; sg 95 crypto/authenc.c head->length += sg->length; sg 96 crypto/authenc.c sg = scatterwalk_sg_next(sg); sg 99 crypto/authenc.c if (sg) sg 100 crypto/authenc.c scatterwalk_sg_chain(head, 2, sg); sg 342 crypto/blkcipher.c scatterwalk_start(&walk->in, walk->in.sg); sg 343 crypto/blkcipher.c scatterwalk_start(&walk->out, walk->out.sg); sg 211 crypto/ccm.c scatterwalk_start(&walk, sg); sg 216 crypto/ccm.c scatterwalk_start(&walk, sg_next(walk.sg)); sg 45 crypto/digest.c struct page *pg = sg_page(sg); sg 46 crypto/digest.c unsigned int offset = sg->offset; sg 47 crypto/digest.c unsigned int l = sg->length; sg 81 crypto/digest.c sg = scatterwalk_sg_next(sg); sg 92 crypto/digest.c return update2(desc, sg, nbytes); sg 136 crypto/digest.c update2(desc, sg, nbytes); sg 69 crypto/eseqiv.c head->length += sg->length; sg 70 crypto/eseqiv.c sg = scatterwalk_sg_next(sg); sg 73 crypto/eseqiv.c if (sg) sg 74 crypto/eseqiv.c scatterwalk_sg_chain(head, 2, sg); sg 117 crypto/gcm.c scatterwalk_start(&walk, sg); sg 123 crypto/gcm.c scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg)); sg 194 crypto/gcm.c struct scatterlist sg[1]; sg 216 crypto/gcm.c sg_init_one(data->sg, &data->hash, sizeof(data->hash)); sg 222 crypto/gcm.c ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, sg 139 crypto/hmac.c return crypto_hash_update(&desc, sg, nbytes); sg 185 crypto/hmac.c scatterwalk_sg_chain(sg1, 2, sg); sg 35 crypto/scatterwalk.c walk->sg = sg; sg 37 crypto/scatterwalk.c BUG_ON(!sg->length); sg 39 crypto/scatterwalk.c walk->offset = sg->offset; sg 56 crypto/scatterwalk.c page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); sg 63 crypto/scatterwalk.c if (walk->offset >= walk->sg->offset + walk->sg->length) sg 64 crypto/scatterwalk.c scatterwalk_start(walk, scatterwalk_sg_next(walk->sg)); sg 112 crypto/scatterwalk.c scatterwalk_start(&walk, sg); sg 114 crypto/scatterwalk.c if (start < offset + sg->length) sg 117 crypto/scatterwalk.c offset += sg->length; sg 118 crypto/scatterwalk.c sg = scatterwalk_sg_next(sg); sg 69 crypto/tcrypt.c ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); sg 71 crypto/tcrypt.c ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); sg 95 crypto/tcrypt.c ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); sg 97 crypto/tcrypt.c ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); sg 109 crypto/tcrypt.c ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); sg 111 crypto/tcrypt.c ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); sg 166 crypto/tcrypt.c struct scatterlist sg[TVMEMSIZE]; sg 196 crypto/tcrypt.c sg_init_table(sg, TVMEMSIZE); sg 197 crypto/tcrypt.c sg_set_buf(sg, tvmem[0] + *keysize, sg 200 crypto/tcrypt.c sg_set_buf(sg + j, tvmem[j], PAGE_SIZE); sg 211 crypto/tcrypt.c ret = test_cipher_jiffies(&desc, enc, sg, sg 214 crypto/tcrypt.c ret = test_cipher_cycles(&desc, enc, sg, sg 241 crypto/tcrypt.c ret = crypto_hash_digest(desc, sg, blen, out); sg 260 crypto/tcrypt.c return test_hash_jiffies_digest(desc, sg, blen, out, sec); sg 268 crypto/tcrypt.c ret = crypto_hash_update(desc, sg, plen); sg 296 crypto/tcrypt.c ret = crypto_hash_digest(desc, sg, blen, out); sg 307 crypto/tcrypt.c ret = crypto_hash_digest(desc, sg, blen, out); sg 337 crypto/tcrypt.c return test_hash_cycles_digest(desc, sg, blen, out); sg 348 crypto/tcrypt.c ret = crypto_hash_update(desc, sg, plen); sg 367 crypto/tcrypt.c ret = crypto_hash_update(desc, sg, plen); sg 396 crypto/tcrypt.c struct scatterlist sg[TVMEMSIZE]; sg 422 crypto/tcrypt.c sg_init_table(sg, TVMEMSIZE); sg 424 crypto/tcrypt.c sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); sg 439 crypto/tcrypt.c ret = test_hash_jiffies(&desc, sg, speed[i].blen, sg 442 crypto/tcrypt.c ret = test_hash_cycles(&desc, sg, speed[i].blen, sg 121 crypto/testmgr.c struct scatterlist sg[8]; sg 146 crypto/testmgr.c sg_init_one(&sg[0], hash_buff, template[i].psize); sg 160 crypto/testmgr.c ahash_request_set_crypt(req, sg, result, template[i].psize); sg 197 crypto/testmgr.c sg_init_table(sg, template[i].np); sg 199 crypto/testmgr.c sg_set_buf(&sg[k], sg 222 crypto/testmgr.c ahash_request_set_crypt(req, sg, result, sg 272 crypto/testmgr.c struct scatterlist sg[8]; sg 342 crypto/testmgr.c sg_init_one(&sg[0], input, sg 347 crypto/testmgr.c aead_request_set_crypt(req, sg, sg, sg 412 crypto/testmgr.c sg_init_table(sg, template[i].np); sg 430 crypto/testmgr.c sg_set_buf(&sg[k], q, template[i].tap[k]); sg 443 crypto/testmgr.c if (WARN_ON(sg[k - 1].offset + sg 444 crypto/testmgr.c sg[k - 1].length + authsize > sg 450 crypto/testmgr.c sg[k - 1].length += authsize; sg 464 crypto/testmgr.c aead_request_set_crypt(req, sg, sg, sg 618 crypto/testmgr.c struct scatterlist sg[8]; sg 670 crypto/testmgr.c sg_init_one(&sg[0], data, template[i].ilen); sg 672 crypto/testmgr.c ablkcipher_request_set_crypt(req, sg, sg, sg 737 crypto/testmgr.c sg_init_table(sg, template[i].np); sg 753 crypto/testmgr.c sg_set_buf(&sg[k], q, template[i].tap[k]); sg 758 crypto/testmgr.c ablkcipher_request_set_crypt(req, sg, sg, sg 500 crypto/twofish_common.c ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \ sg 508 crypto/twofish_common.c ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \ sg 516 crypto/twofish_common.c ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \ sg 596 crypto/twofish_common.c u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0; sg 621 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ sg 622 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ sg 623 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ sg 624 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ sg 625 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ sg 626 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ sg 627 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ sg 628 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ sg 121 crypto/xcbc.c struct page *pg = sg_page(sg); sg 122 crypto/xcbc.c unsigned int offset = sg->offset; sg 123 crypto/xcbc.c unsigned int slen = sg->length; sg 186 crypto/xcbc.c sg = scatterwalk_sg_next(sg); sg 198 crypto/xcbc.c return crypto_xcbc_digest_update2(pdesc, sg, nbytes); sg 258 crypto/xcbc.c crypto_xcbc_digest_update2(pdesc, sg, nbytes); sg 98 fs/ecryptfs/crypto.c struct scatterlist sg; sg 106 fs/ecryptfs/crypto.c sg_init_one(&sg, (u8 *)src, len); sg 126 fs/ecryptfs/crypto.c rc = crypto_hash_update(&desc, &sg, len); sg 301 fs/ecryptfs/crypto.c sg_init_table(sg, sg_size); sg 306 fs/ecryptfs/crypto.c if (sg) sg 307 fs/ecryptfs/crypto.c sg_set_page(&sg[i], pg, 0, offset); sg 310 fs/ecryptfs/crypto.c if (sg) sg 311 fs/ecryptfs/crypto.c sg[i].length = remainder_of_page; sg 315 fs/ecryptfs/crypto.c if (sg) sg 316 fs/ecryptfs/crypto.c sg[i].length = size; sg 2102 fs/ext4/mballoc.c } sg; sg 2113 fs/ext4/mballoc.c i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + sg 2121 fs/ext4/mballoc.c memcpy(&sg, ext4_get_group_info(sb, group), i); sg 2125 fs/ext4/mballoc.c seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free, sg 2126 fs/ext4/mballoc.c sg.info.bb_fragments, sg.info.bb_first_free); sg 2129 fs/ext4/mballoc.c sg.info.bb_counters[i] : 0); sg 92 fs/nfsd/nfs4recover.c struct scatterlist sg; sg 106 fs/nfsd/nfs4recover.c sg_init_one(&sg, clname->data, clname->len); sg 108 fs/nfsd/nfs4recover.c if (crypto_hash_digest(&desc, &sg, sg.length, cksum.data)) sg 17 include/asm-cris/scatterlist.h #define sg_dma_address(sg) ((sg)->address) sg 18 include/asm-cris/scatterlist.h #define sg_dma_len(sg) ((sg)->length) sg 113 include/asm-frv/pci.h frv_cache_wback_inv(sg_dma_address(&sg[i]), sg 114 include/asm-frv/pci.h sg_dma_address(&sg[i])+sg_dma_len(&sg[i])); sg 41 include/asm-frv/scatterlist.h #define sg_dma_address(sg) ((sg)->dma_address) sg 42 include/asm-frv/scatterlist.h #define sg_dma_len(sg) ((sg)->length) sg 96 include/asm-generic/dma-mapping.h return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); sg 105 include/asm-generic/dma-mapping.h pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); sg 134 include/asm-generic/dma-mapping.h pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); sg 143 include/asm-generic/dma-mapping.h pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); sg 63 include/asm-generic/pci-dma-compat.h return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); sg 70 include/asm-generic/pci-dma-compat.h dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); sg 91 include/asm-generic/pci-dma-compat.h dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); sg 98 include/asm-generic/pci-dma-compat.h dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); sg 20 include/asm-m68k/scatterlist.h #define sg_dma_address(sg) ((sg)->dma_address) sg 21 include/asm-m68k/scatterlist.h #define sg_dma_len(sg) ((sg)->length) sg 78 include/asm-mn10300/dma-mapping.h struct scatterlist *sg; sg 84 include/asm-mn10300/dma-mapping.h for_each_sg(sglist, sg, nents, i) { sg 85 include/asm-mn10300/dma-mapping.h BUG_ON(!sg_page(sg)); sg 87 include/asm-mn10300/dma-mapping.h sg->dma_address = sg_phys(sg); sg 52 include/asm-mn10300/scatterlist.h #define sg_dma_address(sg) ((sg)->dma_address) sg 53 include/asm-mn10300/scatterlist.h #define sg_dma_len(sg) ((sg)->length) sg 16 include/asm-parisc/dma-mapping.h int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); sg 17 include/asm-parisc/dma-mapping.h void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction); sg 20 include/asm-parisc/dma-mapping.h void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); sg 21 include/asm-parisc/dma-mapping.h void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction); sg 95 include/asm-parisc/dma-mapping.h return hppa_dma_ops->map_sg(dev, sg, nents, direction); sg 102 include/asm-parisc/dma-mapping.h hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction); sg 159 include/asm-parisc/dma-mapping.h hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction); sg 167 include/asm-parisc/dma-mapping.h hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction); sg 21 include/asm-parisc/scatterlist.h #define sg_virt_addr(sg) ((unsigned long)sg_virt(sg)) sg 22 include/asm-parisc/scatterlist.h #define sg_dma_address(sg) ((sg)->iova) sg 23 include/asm-parisc/scatterlist.h #define sg_dma_len(sg) ((sg)->iova_length) sg 43 include/asm-x86/dma-mapping.h struct scatterlist *sg, int nelems, sg 46 include/asm-x86/dma-mapping.h struct scatterlist *sg, int nelems, sg 48 include/asm-x86/dma-mapping.h int (*map_sg)(struct device *hwdev, struct scatterlist *sg, sg 51 include/asm-x86/dma-mapping.h struct scatterlist *sg, int nents, sg 123 include/asm-x86/dma-mapping.h return ops->map_sg(hwdev, sg, nents, direction); sg 134 include/asm-x86/dma-mapping.h ops->unmap_sg(hwdev, sg, nents, direction); sg 196 include/asm-x86/dma-mapping.h ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); sg 208 include/asm-x86/dma-mapping.h ops->sync_sg_for_device(hwdev, sg, nelems, direction); sg 26 include/asm-x86/scatterlist.h #define sg_dma_address(sg) ((sg)->dma_address) sg 28 include/asm-x86/scatterlist.h # define sg_dma_len(sg) ((sg)->length) sg 30 include/asm-x86/scatterlist.h # define sg_dma_len(sg) ((sg)->dma_length) sg 60 include/asm-xtensa/dma-mapping.h for (i = 0; i < nents; i++, sg++ ) { sg 61 include/asm-xtensa/dma-mapping.h BUG_ON(!sg_page(sg)); sg 63 include/asm-xtensa/dma-mapping.h sg->dma_address = sg_phys(sg); sg 64 include/asm-xtensa/dma-mapping.h consistent_sync(sg_virt(sg), sg->length, direction); sg 129 include/asm-xtensa/dma-mapping.h for (i = 0; i < nelems; i++, sg++) sg 130 include/asm-xtensa/dma-mapping.h consistent_sync(sg_virt(sg), sg->length, dir); sg 138 include/asm-xtensa/dma-mapping.h for (i = 0; i < nelems; i++, sg++) sg 139 include/asm-xtensa/dma-mapping.h consistent_sync(sg_virt(sg), sg->length, dir); sg 33 include/asm-xtensa/scatterlist.h #define sg_dma_address(sg) ((sg)->dma_address) sg 34 include/asm-xtensa/scatterlist.h #define sg_dma_len(sg) ((sg)->length) sg 66 include/crypto/algapi.h struct scatterlist *sg; sg 252 include/crypto/algapi.h walk->in.sg = src; sg 253 include/crypto/algapi.h walk->out.sg = dst; sg 32 include/crypto/internal/hash.h struct scatterlist *sg; sg 65 include/crypto/scatterwalk.h if (sg_is_last(sg)) sg 68 include/crypto/scatterwalk.h return (++sg)->length ? sg : (void *)sg_page(sg); sg 74 include/crypto/scatterwalk.h return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) + sg 80 include/crypto/scatterwalk.h unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; sg 106 include/crypto/scatterwalk.h return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); sg 759 include/drm/drmP.h struct drm_sg_mem *sg; /**< Scatter gather memory */ sg 285 include/linux/crypto.h int (*update)(struct hash_desc *desc, struct scatterlist *sg, sg 288 include/linux/crypto.h int (*digest)(struct hash_desc *desc, struct scatterlist *sg, sg 424 include/linux/crypto.h struct scatterlist *sg, unsigned int nsg); sg 426 include/linux/crypto.h int (*digest)(struct hash_desc *desc, struct scatterlist *sg, sg 1254 include/linux/crypto.h return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); sg 1266 include/linux/crypto.h return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); sg 814 include/linux/i2o.h sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); sg 828 include/linux/i2o.h *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); sg 829 include/linux/i2o.h *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); sg 832 include/linux/i2o.h *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); sg 834 include/linux/i2o.h sg = sg_next(sg); sg 447 include/linux/ide.h struct scatterlist *sg; sg 533 include/linux/libata.h struct scatterlist *sg; sg 1365 include/linux/libata.h qc->sg = NULL; sg 247 include/linux/memstick.h struct scatterlist sg; sg 117 include/linux/mmc/core.h struct scatterlist *sg; /* I/O scatter list */ sg 40 include/linux/scatterlist.h #define sg_is_chain(sg) ((sg)->page_link & 0x01) sg 41 include/linux/scatterlist.h #define sg_is_last(sg) ((sg)->page_link & 0x02) sg 43 include/linux/scatterlist.h ((struct scatterlist *) ((sg)->page_link & ~0x03)) sg 57 include/linux/scatterlist.h unsigned long page_link = sg->page_link & 0x3; sg 65 include/linux/scatterlist.h BUG_ON(sg->sg_magic != SG_MAGIC); sg 66 include/linux/scatterlist.h BUG_ON(sg_is_chain(sg)); sg 68 include/linux/scatterlist.h sg->page_link = page_link | (unsigned long) page; sg 88 include/linux/scatterlist.h sg_assign_page(sg, page); sg 89 include/linux/scatterlist.h sg->offset = offset; sg 90 include/linux/scatterlist.h sg->length = len; sg 96 include/linux/scatterlist.h BUG_ON(sg->sg_magic != SG_MAGIC); sg 97 include/linux/scatterlist.h BUG_ON(sg_is_chain(sg)); sg 99 include/linux/scatterlist.h return (struct page *)((sg)->page_link & ~0x3); sg 112 include/linux/scatterlist.h sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); sg 119 include/linux/scatterlist.h for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) sg 163 include/linux/scatterlist.h BUG_ON(sg->sg_magic != SG_MAGIC); sg 168 include/linux/scatterlist.h sg->page_link |= 0x02; sg 169 include/linux/scatterlist.h sg->page_link &= ~0x01; sg 184 include/linux/scatterlist.h return page_to_phys(sg_page(sg)) + sg->offset; sg 199 include/linux/scatterlist.h return page_address(sg_page(sg)) + sg->offset; sg 1581 include/linux/usb.h struct scatterlist *sg; sg 60 include/linux/virtio.h struct scatterlist sg[], sg 951 include/rdma/ib_verbs.h struct scatterlist *sg, int nents, sg 954 include/rdma/ib_verbs.h struct scatterlist *sg, int nents, sg 957 include/rdma/ib_verbs.h struct scatterlist *sg); sg 959 include/rdma/ib_verbs.h struct scatterlist *sg); sg 1695 include/rdma/ib_verbs.h return dev->dma_ops->map_sg(dev, sg, nents, direction); sg 1696 include/rdma/ib_verbs.h return dma_map_sg(dev->dma_device, sg, nents, direction); sg 1711 include/rdma/ib_verbs.h dev->dma_ops->unmap_sg(dev, sg, nents, direction); sg 1713 include/rdma/ib_verbs.h dma_unmap_sg(dev->dma_device, sg, nents, direction); sg 1721 include/rdma/ib_verbs.h return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); sg 1729 include/rdma/ib_verbs.h dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); sg 1740 include/rdma/ib_verbs.h return dev->dma_ops->dma_address(dev, sg); sg 1741 include/rdma/ib_verbs.h return sg_dma_address(sg); sg 1753 include/rdma/ib_verbs.h return dev->dma_ops->dma_len(dev, sg); sg 1754 include/rdma/ib_verbs.h return sg_dma_len(sg); sg 180 include/scsi/scsi_cmnd.h for_each_sg(scsi_sglist(cmd), sg, nseg, __i) sg 292 include/scsi/scsi_cmnd.h for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i) sg 986 include/sound/pcm.h struct snd_sg_buf *sg = snd_pcm_substream_sgbuf(substream); sg 987 include/sound/pcm.h return snd_sgbuf_get_addr(sg, ofs); sg 993 include/sound/pcm.h struct snd_sg_buf *sg = snd_pcm_substream_sgbuf(substream); sg 994 include/sound/pcm.h return snd_sgbuf_get_ptr(sg, ofs); sg 126 kernel/sched.c return reciprocal_divide(load, sg->reciprocal_cpu_power); sg 135 kernel/sched.c sg->__cpu_power += val; sg 136 kernel/sched.c sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power); sg 6941 kernel/sched.c struct sched_group *sg; sg 6942 kernel/sched.c int group = group_fn(i, cpu_map, &sg, tmpmask); sg 6948 kernel/sched.c cpus_clear(sg->cpumask); sg 6949 kernel/sched.c sg->__cpu_power = 0; sg 6956 kernel/sched.c cpu_set(j, sg->cpumask); sg 6959 kernel/sched.c first = sg; sg 6961 kernel/sched.c last->next = sg; sg 6962 kernel/sched.c last = sg; sg 7054 kernel/sched.c if (sg) sg 7055 kernel/sched.c *sg = &per_cpu(sched_group_cpus, cpu); sg 7078 kernel/sched.c if (sg) sg 7079 kernel/sched.c *sg = &per_cpu(sched_group_core, group); sg 7087 kernel/sched.c if (sg) sg 7088 kernel/sched.c *sg = &per_cpu(sched_group_core, cpu); sg 7112 kernel/sched.c if (sg) sg 7113 kernel/sched.c *sg = &per_cpu(sched_group_phys, group); sg 7138 kernel/sched.c if (sg) sg 7139 kernel/sched.c *sg = &per_cpu(sched_group_allnodes, group); sg 7145 kernel/sched.c struct sched_group *sg = group_head; sg 7148 kernel/sched.c if (!sg) sg 7151 kernel/sched.c for_each_cpu_mask_nr(j, sg->cpumask) { sg 7163 kernel/sched.c sg_inc_cpu_power(sg, sd->groups->__cpu_power); sg 7165 kernel/sched.c sg = sg->next; sg 7166 kernel/sched.c } while (sg != group_head); sg 7184 kernel/sched.c struct sched_group *oldsg, *sg = sched_group_nodes[i]; sg 7191 kernel/sched.c if (sg == NULL) sg 7193 kernel/sched.c sg = sg->next; sg 7195 kernel/sched.c oldsg = sg; sg 7196 kernel/sched.c sg = sg->next; sg 7549 kernel/sched.c struct sched_group *sg, *prev; sg 7567 kernel/sched.c sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); sg 7568 kernel/sched.c if (!sg) { sg 7573 kernel/sched.c sched_group_nodes[i] = sg; sg 7578 kernel/sched.c sd->groups = sg; sg 7580 kernel/sched.c sg->__cpu_power = 0; sg 7581 kernel/sched.c sg->cpumask = *nodemask; sg 7582 kernel/sched.c sg->next = sg; sg 7584 kernel/sched.c prev = sg; sg 7601 kernel/sched.c sg = kmalloc_node(sizeof(struct sched_group), sg 7603 kernel/sched.c if (!sg) { sg 7608 kernel/sched.c sg->__cpu_power = 0; sg 7609 kernel/sched.c sg->cpumask = *tmpmask; sg 7610 kernel/sched.c sg->next = prev->next; sg 7612 kernel/sched.c prev->next = sg; sg 7613 kernel/sched.c prev = sg; sg 7645 kernel/sched.c struct sched_group *sg; sg 7647 kernel/sched.c cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, sg 7649 kernel/sched.c init_numa_sched_groups_power(sg); sg 26 lib/scatterlist.c BUG_ON(sg->sg_magic != SG_MAGIC); sg 28 lib/scatterlist.c if (sg_is_last(sg)) sg 31 lib/scatterlist.c sg++; sg 32 lib/scatterlist.c if (unlikely(sg_is_chain(sg))) sg 33 lib/scatterlist.c sg = sg_chain_ptr(sg); sg 35 lib/scatterlist.c return sg; sg 58 lib/scatterlist.c struct scatterlist *sg, *ret = NULL; sg 61 lib/scatterlist.c for_each_sg(sgl, sg, nents, i) sg 62 lib/scatterlist.c ret = sg; sg 106 lib/scatterlist.c sg_init_table(sg, 1); sg 107 lib/scatterlist.c sg_set_buf(sg, buf, buflen); sg 126 lib/scatterlist.c free_page((unsigned long) sg); sg 128 lib/scatterlist.c kfree(sg); sg 214 lib/scatterlist.c struct scatterlist *sg, *prv; sg 236 lib/scatterlist.c sg = alloc_fn(alloc_size, gfp_mask); sg 237 lib/scatterlist.c if (unlikely(!sg)) sg 240 lib/scatterlist.c sg_init_table(sg, alloc_size); sg 248 lib/scatterlist.c sg_chain(prv, max_ents, sg); sg 250 lib/scatterlist.c table->sgl = sg; sg 256 lib/scatterlist.c sg_mark_end(&sg[sg_size - 1]); sg 266 lib/scatterlist.c prv = sg; sg 39 lib/swiotlb.c #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) sg 40 lib/swiotlb.c #define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)) sg 710 lib/swiotlb.c struct scatterlist *sg; sg 717 lib/swiotlb.c for_each_sg(sgl, sg, nelems, i) { sg 718 lib/swiotlb.c addr = SG_ENT_VIRT_ADDRESS(sg); sg 721 lib/swiotlb.c address_needs_mapping(hwdev, dev_addr, sg->length)) { sg 722 lib/swiotlb.c void *map = map_single(hwdev, addr, sg->length, dir); sg 726 lib/swiotlb.c swiotlb_full(hwdev, sg->length, dir, 0); sg 732 lib/swiotlb.c sg->dma_address = virt_to_bus(map); sg 734 lib/swiotlb.c sg->dma_address = dev_addr; sg 735 lib/swiotlb.c sg->dma_length = sg->length; sg 756 lib/swiotlb.c struct scatterlist *sg; sg 761 lib/swiotlb.c for_each_sg(sgl, sg, nelems, i) { sg 762 lib/swiotlb.c if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) sg 763 lib/swiotlb.c unmap_single(hwdev, bus_to_virt(sg->dma_address), sg 764 lib/swiotlb.c sg->dma_length, dir); sg 766 lib/swiotlb.c dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); sg 789 lib/swiotlb.c struct scatterlist *sg; sg 794 lib/swiotlb.c for_each_sg(sgl, sg, nelems, i) { sg 795 lib/swiotlb.c if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) sg 796 lib/swiotlb.c sync_single(hwdev, bus_to_virt(sg->dma_address), sg 797 lib/swiotlb.c sg->dma_length, dir, target); sg 799 lib/swiotlb.c dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); sg 807 lib/swiotlb.c swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); sg 814 lib/swiotlb.c swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); sg 132 net/9p/trans_virtio.c struct scatterlist sg[VIRTQUEUE_NUM]; sg 277 net/9p/trans_virtio.c sg_set_buf(&sg[index++], data, s); sg 327 net/9p/trans_virtio.c out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, tc->sdata, tc->size); sg 328 net/9p/trans_virtio.c in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, t->msize); sg 332 net/9p/trans_virtio.c if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, tc)) { sg 407 net/9p/trans_virtio.c sg_init_table(chan->sg, VIRTQUEUE_NUM); sg 2281 net/core/skbuff.c int sg = features & NETIF_F_SG; sg 2305 net/core/skbuff.c if (hsize > len || !sg) sg 2328 net/core/skbuff.c if (!sg) { sg 2420 net/core/skbuff.c sg_set_buf(sg, skb->data + offset, copy); sg 2438 net/core/skbuff.c sg_set_page(&sg[elt], frag->page, copy, sg 2460 net/core/skbuff.c elt += __skb_to_sgvec(list, sg+elt, offset - start, sg 2475 net/core/skbuff.c int nsg = __skb_to_sgvec(skb, sg, offset, len); sg 2477 net/core/skbuff.c sg_mark_end(&sg[nsg - 1]); sg 361 net/ieee80211/ieee80211_crypt_tkip.c struct scatterlist sg; sg 393 net/ieee80211/ieee80211_crypt_tkip.c sg_init_one(&sg, pos, len + 4); sg 394 net/ieee80211/ieee80211_crypt_tkip.c return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); sg 421 net/ieee80211/ieee80211_crypt_tkip.c struct scatterlist sg; sg 486 net/ieee80211/ieee80211_crypt_tkip.c sg_init_one(&sg, pos, plen + 4); sg 487 net/ieee80211/ieee80211_crypt_tkip.c if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { sg 532 net/ieee80211/ieee80211_crypt_tkip.c struct scatterlist sg[2]; sg 538 net/ieee80211/ieee80211_crypt_tkip.c sg_init_table(sg, 2); sg 539 net/ieee80211/ieee80211_crypt_tkip.c sg_set_buf(&sg[0], hdr, 16); sg 540 net/ieee80211/ieee80211_crypt_tkip.c sg_set_buf(&sg[1], data, data_len); sg 547 net/ieee80211/ieee80211_crypt_tkip.c return crypto_hash_digest(&desc, sg, data_len + 16, mic); sg 143 net/ieee80211/ieee80211_crypt_wep.c struct scatterlist sg; sg 173 net/ieee80211/ieee80211_crypt_wep.c sg_init_one(&sg, pos, len + 4); sg 174 net/ieee80211/ieee80211_crypt_wep.c return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); sg 191 net/ieee80211/ieee80211_crypt_wep.c struct scatterlist sg; sg 213 net/ieee80211/ieee80211_crypt_wep.c sg_init_one(&sg, pos, plen + 4); sg 214 net/ieee80211/ieee80211_crypt_wep.c if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) sg 110 net/ipv4/esp4.c struct scatterlist *sg; sg 149 net/ipv4/esp4.c sg = asg + 1; sg 204 net/ipv4/esp4.c sg_init_table(sg, nfrags); sg 205 net/ipv4/esp4.c skb_to_sgvec(skb, sg, sg 211 net/ipv4/esp4.c aead_givcrypt_set_crypt(req, sg, sg, clen, iv); sg 335 net/ipv4/esp4.c struct scatterlist *sg; sg 358 net/ipv4/esp4.c sg = asg + 1; sg 367 net/ipv4/esp4.c sg_init_table(sg, nfrags); sg 368 net/ipv4/esp4.c skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg 372 net/ipv4/esp4.c aead_request_set_crypt(req, sg, sg, elen, iv); sg 2597 net/ipv4/tcp.c struct scatterlist sg; sg 2603 net/ipv4/tcp.c sg_init_one(&sg, th, sizeof(struct tcphdr)); sg 2604 net/ipv4/tcp.c err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); sg 2614 net/ipv4/tcp.c struct scatterlist sg; sg 2622 net/ipv4/tcp.c sg_init_table(&sg, 1); sg 2624 net/ipv4/tcp.c sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); sg 2625 net/ipv4/tcp.c if (crypto_hash_update(desc, &sg, head_data_len)) sg 2630 net/ipv4/tcp.c sg_set_page(&sg, f->page, f->size, f->page_offset); sg 2631 net/ipv4/tcp.c if (crypto_hash_update(desc, &sg, f->size)) sg 2642 net/ipv4/tcp.c struct scatterlist sg; sg 2644 net/ipv4/tcp.c sg_init_one(&sg, key->key, key->keylen); sg 2645 net/ipv4/tcp.c return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); sg 996 net/ipv4/tcp_ipv4.c struct scatterlist sg; sg 1011 net/ipv4/tcp_ipv4.c sg_init_one(&sg, bp, sizeof(*bp)); sg 1012 net/ipv4/tcp_ipv4.c return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); sg 136 net/ipv6/esp6.c struct scatterlist *sg; sg 173 net/ipv6/esp6.c sg = asg + 1; sg 193 net/ipv6/esp6.c sg_init_table(sg, nfrags); sg 194 net/ipv6/esp6.c skb_to_sgvec(skb, sg, sg 200 net/ipv6/esp6.c aead_givcrypt_set_crypt(req, sg, sg, clen, iv); sg 282 net/ipv6/esp6.c struct scatterlist *sg; sg 309 net/ipv6/esp6.c sg = asg + 1; sg 318 net/ipv6/esp6.c sg_init_table(sg, nfrags); sg 319 net/ipv6/esp6.c skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg 323 net/ipv6/esp6.c aead_request_set_crypt(req, sg, sg, elen, iv); sg 746 net/ipv6/tcp_ipv6.c struct scatterlist sg; sg 755 net/ipv6/tcp_ipv6.c sg_init_one(&sg, bp, sizeof(*bp)); sg 756 net/ipv6/tcp_ipv6.c return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); sg 263 net/ipx/ipx_route.c struct sockaddr_ipx *sg, *st; sg 269 net/ipx/ipx_route.c sg = (struct sockaddr_ipx *)&rt.rt_gateway; sg 274 net/ipx/ipx_route.c sg->sipx_family != AF_IPX || sg 285 net/ipx/ipx_route.c f.ipx_router_network = sg->sipx_network; sg 286 net/ipx/ipx_route.c memcpy(f.ipx_router_node, sg->sipx_node, IPX_NODE_LEN); sg 125 net/mac80211/wep.c struct scatterlist sg; sg 132 net/mac80211/wep.c sg_init_one(&sg, data, data_len + WEP_ICV_LEN); sg 133 net/mac80211/wep.c crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); sg 192 net/mac80211/wep.c struct scatterlist sg; sg 196 net/mac80211/wep.c sg_init_one(&sg, data, data_len + WEP_ICV_LEN); sg 197 net/mac80211/wep.c crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); sg 115 net/rxrpc/rxkad.c struct scatterlist sg[2]; sg 138 net/rxrpc/rxkad.c sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); sg 139 net/rxrpc/rxkad.c sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); sg 140 net/rxrpc/rxkad.c crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); sg 159 net/rxrpc/rxkad.c struct scatterlist sg[2]; sg 182 net/rxrpc/rxkad.c sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); sg 183 net/rxrpc/rxkad.c sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); sg 184 net/rxrpc/rxkad.c crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); sg 206 net/rxrpc/rxkad.c struct scatterlist sg[16]; sg 228 net/rxrpc/rxkad.c sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); sg 229 net/rxrpc/rxkad.c sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr)); sg 230 net/rxrpc/rxkad.c crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); sg 240 net/rxrpc/rxkad.c sg_init_table(sg, nsg); sg 241 net/rxrpc/rxkad.c skb_to_sgvec(skb, sg, 0, len); sg 242 net/rxrpc/rxkad.c crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); sg 259 net/rxrpc/rxkad.c struct scatterlist sg[2]; sg 292 net/rxrpc/rxkad.c sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); sg 293 net/rxrpc/rxkad.c sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); sg 294 net/rxrpc/rxkad.c crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); sg 333 net/rxrpc/rxkad.c struct scatterlist sg[16]; sg 348 net/rxrpc/rxkad.c sg_init_table(sg, nsg); sg 349 net/rxrpc/rxkad.c skb_to_sgvec(skb, sg, 0, 8); sg 357 net/rxrpc/rxkad.c crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8); sg 408 net/rxrpc/rxkad.c struct scatterlist _sg[4], *sg; sg 423 net/rxrpc/rxkad.c sg = _sg; sg 425 net/rxrpc/rxkad.c sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO); sg 426 net/rxrpc/rxkad.c if (!sg) sg 430 net/rxrpc/rxkad.c sg_init_table(sg, nsg); sg 431 net/rxrpc/rxkad.c skb_to_sgvec(skb, sg, 0, skb->len); sg 440 net/rxrpc/rxkad.c crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len); sg 441 net/rxrpc/rxkad.c if (sg != _sg) sg 442 net/rxrpc/rxkad.c kfree(sg); sg 491 net/rxrpc/rxkad.c struct scatterlist sg[2]; sg 527 net/rxrpc/rxkad.c sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); sg 528 net/rxrpc/rxkad.c sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); sg 529 net/rxrpc/rxkad.c crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); sg 697 net/rxrpc/rxkad.c sg_init_table(sg, 2); sg 699 net/rxrpc/rxkad.c sg_set_buf(&sg[0], buf, buflen); sg 700 net/rxrpc/rxkad.c if (sg[0].offset + buflen > PAGE_SIZE) { sg 702 net/rxrpc/rxkad.c sg[0].length = PAGE_SIZE - sg[0].offset; sg 703 net/rxrpc/rxkad.c sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); sg 707 net/rxrpc/rxkad.c sg_mark_end(&sg[nsg - 1]); sg 709 net/rxrpc/rxkad.c ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); sg 721 net/rxrpc/rxkad.c struct scatterlist sg[2]; sg 729 net/rxrpc/rxkad.c rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); sg 730 net/rxrpc/rxkad.c crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); sg 825 net/rxrpc/rxkad.c struct scatterlist sg[1]; sg 858 net/rxrpc/rxkad.c sg_init_one(&sg[0], ticket, ticket_len); sg 859 net/rxrpc/rxkad.c crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len); sg 968 net/rxrpc/rxkad.c struct scatterlist sg[2]; sg 986 net/rxrpc/rxkad.c rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); sg 987 net/rxrpc/rxkad.c crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); sg 702 net/sctp/auth.c struct scatterlist sg; sg 735 net/sctp/auth.c sg_init_one(&sg, auth, end - (unsigned char *)auth); sg 744 net/sctp/auth.c crypto_hash_digest(&desc, &sg, sg.length, digest); sg 1478 net/sctp/sm_make_chunk.c struct scatterlist sg; sg 1540 net/sctp/sm_make_chunk.c sg_init_one(&sg, &cookie->c, bodysize); sg 1547 net/sctp/sm_make_chunk.c crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) sg 1572 net/sctp/sm_make_chunk.c struct scatterlist sg; sg 1610 net/sctp/sm_make_chunk.c sg_init_one(&sg, bear_cookie, bodysize); sg 1617 net/sctp/sm_make_chunk.c crypto_hash_digest(&desc, &sg, bodysize, digest)) { sg 1627 net/sctp/sm_make_chunk.c crypto_hash_digest(&desc, &sg, bodysize, digest)) { sg 61 net/sunrpc/auth_gss/gss_krb5_crypto.c struct scatterlist sg[1]; sg 78 net/sunrpc/auth_gss/gss_krb5_crypto.c sg_init_one(sg, out, length); sg 80 net/sunrpc/auth_gss/gss_krb5_crypto.c ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); sg 95 net/sunrpc/auth_gss/gss_krb5_crypto.c struct scatterlist sg[1]; sg 111 net/sunrpc/auth_gss/gss_krb5_crypto.c sg_init_one(sg, out, length); sg 113 net/sunrpc/auth_gss/gss_krb5_crypto.c ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); sg 124 net/sunrpc/auth_gss/gss_krb5_crypto.c return crypto_hash_update(desc, sg, sg->length); sg 133 net/sunrpc/auth_gss/gss_krb5_crypto.c struct scatterlist sg[1]; sg 145 net/sunrpc/auth_gss/gss_krb5_crypto.c sg_init_one(sg, header, hdrlen); sg 146 net/sunrpc/auth_gss/gss_krb5_crypto.c err = crypto_hash_update(&desc, sg, hdrlen); sg 178 net/sunrpc/auth_gss/gss_krb5_crypto.c int thislen = desc->fraglen + sg->length; sg 192 net/sunrpc/auth_gss/gss_krb5_crypto.c in_page = sg_page(sg); sg 194 net/sunrpc/auth_gss/gss_krb5_crypto.c sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, sg 195 net/sunrpc/auth_gss/gss_krb5_crypto.c sg->offset); sg 196 net/sunrpc/auth_gss/gss_krb5_crypto.c sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, sg 197 net/sunrpc/auth_gss/gss_krb5_crypto.c sg->offset); sg 199 net/sunrpc/auth_gss/gss_krb5_crypto.c desc->fraglen += sg->length; sg 200 net/sunrpc/auth_gss/gss_krb5_crypto.c desc->pos += sg->length; sg 220 net/sunrpc/auth_gss/gss_krb5_crypto.c sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, sg 221 net/sunrpc/auth_gss/gss_krb5_crypto.c sg->offset + sg->length - fraglen); sg 271 net/sunrpc/auth_gss/gss_krb5_crypto.c int thislen = desc->fraglen + sg->length; sg 277 net/sunrpc/auth_gss/gss_krb5_crypto.c sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, sg 278 net/sunrpc/auth_gss/gss_krb5_crypto.c sg->offset); sg 280 net/sunrpc/auth_gss/gss_krb5_crypto.c desc->fraglen += sg->length; sg 298 net/sunrpc/auth_gss/gss_krb5_crypto.c sg_set_page(&desc->frags[0], sg_page(sg), fraglen, sg 299 net/sunrpc/auth_gss/gss_krb5_crypto.c sg->offset + sg->length - fraglen); sg 136 net/sunrpc/auth_gss/gss_spkm3_seal.c return crypto_hash_update(desc, sg, sg->length); sg 147 net/sunrpc/auth_gss/gss_spkm3_seal.c struct scatterlist sg[1]; sg 176 net/sunrpc/auth_gss/gss_spkm3_seal.c sg_init_one(sg, header, hdrlen); sg 177 net/sunrpc/auth_gss/gss_spkm3_seal.c crypto_hash_update(&desc, sg, sg->length); sg 1050 net/sunrpc/xdr.c struct scatterlist sg[1]; sg 1052 net/sunrpc/xdr.c sg_init_table(sg, 1); sg 1060 net/sunrpc/xdr.c sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); sg 1061 net/sunrpc/xdr.c ret = actor(sg, data); sg 1083 net/sunrpc/xdr.c sg_set_page(sg, buf->pages[i], thislen, page_offset); sg 1084 net/sunrpc/xdr.c ret = actor(sg, data); sg 1100 net/sunrpc/xdr.c sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); sg 1101 net/sunrpc/xdr.c ret = actor(sg, data); sg 700 net/xfrm/xfrm_algo.c struct scatterlist sg; sg 707 net/xfrm/xfrm_algo.c sg_init_one(&sg, skb->data + offset, copy); sg 709 net/xfrm/xfrm_algo.c err = icv_update(desc, &sg, copy); sg 730 net/xfrm/xfrm_algo.c sg_init_table(&sg, 1); sg 731 net/xfrm/xfrm_algo.c sg_set_page(&sg, frag->page, copy, sg 734 net/xfrm/xfrm_algo.c err = icv_update(desc, &sg, copy); sg 333 sound/core/pcm_memory.c struct snd_sg_buf *sg = snd_pcm_substream_sgbuf(substream); sg 339 sound/core/pcm_memory.c pg = sg->table[start].addr >> PAGE_SHIFT; sg 345 sound/core/pcm_memory.c if ((sg->table[start].addr >> PAGE_SHIFT) != pg)