frag 85 fs/adfs/map.c u32 frag; frag 88 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); frag 107 fs/adfs/map.c if (frag == frag_id) frag 116 fs/adfs/map.c frag, start, mapptr); frag 145 fs/adfs/map.c u32 frag; frag 151 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); frag 157 fs/adfs/map.c if (frag == 0) frag 161 fs/adfs/map.c start += frag; frag 166 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); frag 186 fs/adfs/map.c } while (frag >= idlen + 1); frag 188 fs/adfs/map.c if (frag != 0) frag 73 fs/jffs2/debug.c struct jffs2_node_frag *frag; frag 76 fs/jffs2/debug.c for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { frag 77 fs/jffs2/debug.c struct jffs2_full_dnode *fn = frag->node; frag 94 fs/jffs2/debug.c if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) frag 95 fs/jffs2/debug.c && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { frag 101 fs/jffs2/debug.c if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) frag 102 fs/jffs2/debug.c && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { frag 104 fs/jffs2/debug.c ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); frag 448 fs/jffs2/gc.c struct jffs2_node_frag *frag; frag 482 fs/jffs2/gc.c for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { frag 483 fs/jffs2/gc.c if (frag->node && frag->node->raw == raw) { frag 484 fs/jffs2/gc.c fn = frag->node; frag 485 fs/jffs2/gc.c end = frag->ofs + frag->size; frag 487 fs/jffs2/gc.c start = frag->ofs; frag 488 fs/jffs2/gc.c if (nrfrags == frag->node->frags) frag 497 fs/jffs2/gc.c frag->node->raw = f->inocache->nodes; frag 946 fs/jffs2/gc.c struct jffs2_node_frag *frag; frag 1009 fs/jffs2/gc.c frag = frag_last(&f->fragtree); frag 1010 fs/jffs2/gc.c if (frag) frag 1013 fs/jffs2/gc.c ilen = frag->ofs + frag->size; frag 1065 fs/jffs2/gc.c for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); frag 1066 fs/jffs2/gc.c frag; frag = frag_next(frag)) { frag 1067 fs/jffs2/gc.c if (frag->ofs > fn->size + fn->ofs) frag 1069 fs/jffs2/gc.c if (frag->node == fn) { frag 1070 fs/jffs2/gc.c frag->node = new_fn; frag 1118 fs/jffs2/gc.c struct jffs2_node_frag *frag; frag 1124 fs/jffs2/gc.c frag = jffs2_lookup_node_frag(&f->fragtree, start); frag 1128 fs/jffs2/gc.c BUG_ON(frag->ofs != start); frag 1131 fs/jffs2/gc.c while((frag = frag_prev(frag)) && frag->ofs >= min) { frag 1135 fs/jffs2/gc.c if (frag->ofs > min) { frag 1137 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size)); frag 1138 fs/jffs2/gc.c start = frag->ofs; frag 1142 fs/jffs2/gc.c if (!frag->node || !frag->node->raw) { frag 1144 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size)); frag 1152 fs/jffs2/gc.c struct jffs2_raw_node_ref *raw = frag->node->raw; frag 1159 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size, ref_offset(raw))); frag 1160 fs/jffs2/gc.c start = frag->ofs; frag 1165 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size, jeb->offset)); frag 1170 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size, jeb->offset)); frag 1171 fs/jffs2/gc.c start = frag->ofs; frag 1179 fs/jffs2/gc.c frag = jffs2_lookup_node_frag(&f->fragtree, end-1); frag 1181 fs/jffs2/gc.c while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) { frag 1185 fs/jffs2/gc.c if (frag->ofs+frag->size < max) { frag 1187 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size)); frag 1188 fs/jffs2/gc.c end = frag->ofs + frag->size; frag 1192 fs/jffs2/gc.c if (!frag->node || !frag->node->raw) { frag 1194 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size)); frag 1202 fs/jffs2/gc.c struct jffs2_raw_node_ref *raw = frag->node->raw; frag 1209 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size, ref_offset(raw))); frag 1210 fs/jffs2/gc.c end = frag->ofs + frag->size; frag 1215 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size, jeb->offset)); frag 1220 fs/jffs2/gc.c frag->ofs, frag->ofs+frag->size, jeb->offset)); frag 1221 fs/jffs2/gc.c end = frag->ofs + frag->size; frag 60 fs/jffs2/nodelist.c struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); frag 65 fs/jffs2/nodelist.c if (frag && frag->ofs != size) { frag 66 fs/jffs2/nodelist.c if (frag->ofs+frag->size > size) { frag 67 fs/jffs2/nodelist.c frag->size = size - frag->ofs; frag 69 fs/jffs2/nodelist.c frag = frag_next(frag); frag 71 fs/jffs2/nodelist.c while (frag && frag->ofs >= size) { frag 72 fs/jffs2/nodelist.c struct jffs2_node_frag *next = frag_next(frag); frag 74 fs/jffs2/nodelist.c frag_erase(frag, list); frag 75 fs/jffs2/nodelist.c jffs2_obsolete_node_frag(c, frag); frag 76 fs/jffs2/nodelist.c frag = next; frag 82 fs/jffs2/nodelist.c frag = frag_last(list); frag 85 fs/jffs2/nodelist.c if (!frag) frag 87 fs/jffs2/nodelist.c if (frag->ofs + frag->size < size) frag 88 fs/jffs2/nodelist.c return frag->ofs + frag->size; frag 92 fs/jffs2/nodelist.c if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { frag 94 fs/jffs2/nodelist.c frag->ofs, frag->ofs + frag->size); frag 95 fs/jffs2/nodelist.c frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; frag 529 fs/jffs2/nodelist.c struct jffs2_node_frag *frag = NULL; frag 536 fs/jffs2/nodelist.c frag = rb_entry(next, struct jffs2_node_frag, rb); frag 538 fs/jffs2/nodelist.c if (frag->ofs + frag->size <= offset) { frag 540 fs/jffs2/nodelist.c if (!prev || frag->ofs > prev->ofs) frag 541 fs/jffs2/nodelist.c prev = frag; frag 542 fs/jffs2/nodelist.c next = frag->rb.rb_right; frag 543 fs/jffs2/nodelist.c } else if (frag->ofs > offset) { frag 544 fs/jffs2/nodelist.c next = frag->rb.rb_left; frag 546 fs/jffs2/nodelist.c return frag; frag 566 fs/jffs2/nodelist.c struct jffs2_node_frag *frag; frag 574 fs/jffs2/nodelist.c frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); frag 575 fs/jffs2/nodelist.c while(frag) { frag 576 fs/jffs2/nodelist.c if (frag->rb.rb_left) { frag 577 fs/jffs2/nodelist.c frag = frag_left(frag); frag 580 fs/jffs2/nodelist.c if (frag->rb.rb_right) { frag 581 fs/jffs2/nodelist.c frag = frag_right(frag); frag 585 fs/jffs2/nodelist.c if (frag->node && !(--frag->node->frags)) { frag 589 fs/jffs2/nodelist.c jffs2_mark_node_obsolete(c, frag->node->raw); frag 591 fs/jffs2/nodelist.c jffs2_free_full_dnode(frag->node); frag 593 fs/jffs2/nodelist.c parent = frag_parent(frag); frag 595 fs/jffs2/nodelist.c if (frag_left(parent) == frag) frag 601 fs/jffs2/nodelist.c jffs2_free_node_frag(frag); frag 602 fs/jffs2/nodelist.c frag = parent; frag 343 fs/jffs2/nodelist.h #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) frag 344 fs/jffs2/nodelist.h #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) frag 345 fs/jffs2/nodelist.h #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb) frag 346 fs/jffs2/nodelist.h #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb) frag 347 fs/jffs2/nodelist.h #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb) frag 348 fs/jffs2/nodelist.h #define frag_erase(frag, list) rb_erase(&frag->rb, list); frag 157 fs/jffs2/read.c struct jffs2_node_frag *frag; frag 163 fs/jffs2/read.c frag = jffs2_lookup_node_frag(&f->fragtree, offset); frag 170 fs/jffs2/read.c if (unlikely(!frag || frag->ofs > offset)) { frag 172 fs/jffs2/read.c if (frag) { frag 173 fs/jffs2/read.c D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); frag 174 fs/jffs2/read.c holesize = min(holesize, frag->ofs - offset); frag 181 fs/jffs2/read.c } else if (unlikely(!frag->node)) { frag 182 fs/jffs2/read.c uint32_t holeend = min(end, frag->ofs + frag->size); frag 183 fs/jffs2/read.c D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size)); frag 187 fs/jffs2/read.c frag = frag_next(frag); frag 193 fs/jffs2/read.c fragofs = offset - frag->ofs; frag 194 fs/jffs2/read.c readlen = min(frag->size - fragofs, end - offset); frag 196 fs/jffs2/read.c frag->ofs+fragofs, frag->ofs+fragofs+readlen, frag 197 fs/jffs2/read.c ref_offset(frag->node->raw), ref_flags(frag->node->raw))); frag 198 fs/jffs2/read.c ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); frag 207 fs/jffs2/read.c frag = frag_next(frag); frag 181 fs/jffs2/wbuf.c struct jffs2_node_frag *frag; frag 196 fs/jffs2/wbuf.c frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset)); frag 197 fs/jffs2/wbuf.c BUG_ON(!frag); frag 199 fs/jffs2/wbuf.c while (!frag->node || frag->node->raw != raw) { frag 200 fs/jffs2/wbuf.c frag = frag_next(frag); frag 201 fs/jffs2/wbuf.c BUG_ON(!frag); frag 203 fs/jffs2/wbuf.c dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node); frag 204 fs/jffs2/wbuf.c return &frag->node->raw; frag 1291 fs/partitions/ldm.c struct frag *f; frag 1306 fs/partitions/ldm.c f = list_entry (item, struct frag, list); frag 1356 fs/partitions/ldm.c kfree (list_entry (item, struct frag, list)); frag 1372 fs/partitions/ldm.c struct frag *f; frag 1378 fs/partitions/ldm.c f = list_entry (item, struct frag, list); frag 94 fs/ufs/inode.c int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); frag 101 fs/ufs/inode.c UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth); frag 130 fs/ufs/inode.c ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask)); frag 153 fs/ufs/inode.c ret = temp + (u64) (frag & uspi->s_fpbmask); frag 81 include/linux/icmp.h } frag; frag 121 include/linux/inet_lro.h int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr, frag 963 include/linux/skbuff.h skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 965 include/linux/skbuff.h frag->page = page; frag 966 include/linux/skbuff.h frag->page_offset = off; frag 967 include/linux/skbuff.h frag->size = size; frag 1513 include/linux/skbuff.h struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; frag 1515 include/linux/skbuff.h return page == frag->page && frag 1516 include/linux/skbuff.h off == frag->page_offset + frag->size; frag 926 include/linux/wireless.h struct iw_param frag; /* Fragmentation threshold */ frag 164 include/net/bluetooth/bluetooth.h register struct sk_buff *frag = skb_shinfo(skb)->frag_list; frag 167 include/net/bluetooth/bluetooth.h for (; frag; frag=frag->next, n++); frag 494 include/net/sctp/sctp.h int frag = pmtu; frag 496 include/net/sctp/sctp.h frag -= sp->pf->af->net_header_len; frag 497 include/net/sctp/sctp.h frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk); frag 500 include/net/sctp/sctp.h frag = min_t(int, frag, sp->user_frag); frag 502 include/net/sctp/sctp.h frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN); frag 504 include/net/sctp/sctp.h return frag; frag 967 net/appletalk/ddp.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 971 net/appletalk/ddp.c vaddr = kmap_skb_frag(frag); frag 972 net/appletalk/ddp.c sum = atalk_sum_partial(vaddr + frag->page_offset + frag 1020 net/bluetooth/l2cap.c struct sk_buff *skb, **frag; frag 1056 net/bluetooth/l2cap.c frag = &skb_shinfo(skb)->frag_list; frag 1060 net/bluetooth/l2cap.c *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err); frag 1061 net/bluetooth/l2cap.c if (!*frag) frag 1064 net/bluetooth/l2cap.c if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) { frag 1072 net/bluetooth/l2cap.c frag = &(*frag)->next; frag 1316 net/bluetooth/l2cap.c struct sk_buff *skb, **frag; frag 1348 net/bluetooth/l2cap.c frag = &skb_shinfo(skb)->frag_list; frag 1352 net/bluetooth/l2cap.c *frag = bt_skb_alloc(count, GFP_ATOMIC); frag 1353 net/bluetooth/l2cap.c if (!*frag) frag 1356 net/bluetooth/l2cap.c memcpy(skb_put(*frag, count), data, count); frag 1361 net/bluetooth/l2cap.c frag = &(*frag)->next; frag 294 net/core/datagram.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 295 net/core/datagram.c struct page *page = frag->page; frag 300 net/core/datagram.c err = memcpy_toiovec(to, vaddr + frag->page_offset + frag 379 net/core/datagram.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 380 net/core/datagram.c struct page *page = frag->page; frag 385 net/core/datagram.c err = memcpy_fromiovec(vaddr + frag->page_offset + frag 463 net/core/datagram.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 464 net/core/datagram.c struct page *page = frag->page; frag 470 net/core/datagram.c frag->page_offset + frag 10 net/core/kmap_skb.h return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ); frag 1024 net/core/skbuff.c struct sk_buff *frag; frag 1059 net/core/skbuff.c for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); frag 1060 net/core/skbuff.c fragp = &frag->next) { frag 1061 net/core/skbuff.c int end = offset + frag->len; frag 1063 net/core/skbuff.c if (skb_shared(frag)) { frag 1066 net/core/skbuff.c nfrag = skb_clone(frag, GFP_ATOMIC); frag 1070 net/core/skbuff.c nfrag->next = frag->next; frag 1071 net/core/skbuff.c kfree_skb(frag); frag 1072 net/core/skbuff.c frag = nfrag; frag 1073 net/core/skbuff.c *fragp = frag; frag 1082 net/core/skbuff.c unlikely((err = pskb_trim(frag, len - offset)))) frag 1085 net/core/skbuff.c if (frag->next) frag 1086 net/core/skbuff.c skb_drop_list(&frag->next); frag 1537 net/core/skbuff.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 1542 net/core/skbuff.c end = start + frag->size; frag 1549 net/core/skbuff.c vaddr = kmap_skb_frag(frag); frag 1550 net/core/skbuff.c memcpy(vaddr + frag->page_offset + offset - start, frag 1623 net/core/skbuff.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 1627 net/core/skbuff.c vaddr = kmap_skb_frag(frag); frag 1628 net/core/skbuff.c csum2 = csum_partial(vaddr + frag->page_offset + frag 1700 net/core/skbuff.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 1704 net/core/skbuff.c vaddr = kmap_skb_frag(frag); frag 1706 net/core/skbuff.c frag->page_offset + frag 2057 net/core/skbuff.c skb_frag_t *frag; frag 2074 net/core/skbuff.c frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; frag 2075 net/core/skbuff.c block_limit = frag->size + st->stepped_offset; frag 2079 net/core/skbuff.c st->frag_data = kmap_skb_frag(frag); frag 2081 net/core/skbuff.c *data = (u8 *) st->frag_data + frag->page_offset + frag 2093 net/core/skbuff.c st->stepped_offset += frag->size; frag 2186 net/core/skbuff.c skb_frag_t *frag = NULL; frag 2216 net/core/skbuff.c frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; frag 2219 net/core/skbuff.c left = PAGE_SIZE - frag->page_offset; frag 2222 net/core/skbuff.c ret = getfrag(from, (page_address(frag->page) + frag 2223 net/core/skbuff.c frag->page_offset + frag->size), frag 2230 net/core/skbuff.c frag->size += copy; frag 2293 net/core/skbuff.c skb_frag_t *frag; frag 2336 net/core/skbuff.c frag = skb_shinfo(nskb)->frags; frag 2345 net/core/skbuff.c *frag = skb_shinfo(skb)->frags[i]; frag 2346 net/core/skbuff.c get_page(frag->page); frag 2347 net/core/skbuff.c size = frag->size; frag 2350 net/core/skbuff.c frag->page_offset += offset - pos; frag 2351 net/core/skbuff.c frag->size -= offset - pos; frag 2360 net/core/skbuff.c frag->size -= pos + size - (offset + len); frag 2364 net/core/skbuff.c frag++; frag 2434 net/core/skbuff.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 2438 net/core/skbuff.c sg_set_page(&sg[elt], frag->page, copy, frag 2439 net/core/skbuff.c frag->page_offset+offset-start); frag 1298 net/core/sock.c skb_frag_t *frag; frag 1308 net/core/sock.c frag = &skb_shinfo(skb)->frags[i]; frag 1309 net/core/sock.c frag->page = page; frag 1310 net/core/sock.c frag->page_offset = 0; frag 1311 net/core/sock.c frag->size = (data_len >= PAGE_SIZE ? frag 79 net/core/user_dma.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 80 net/core/user_dma.c struct page *page = frag->page; frag 86 net/core/user_dma.c frag->page_offset + offset - start, copy); frag 77 net/ieee80211/ieee80211_rx.c (entry->last_frag + 1 == frag || frag == -1) && frag 92 net/ieee80211/ieee80211_rx.c unsigned int frag, seq; frag 96 net/ieee80211/ieee80211_rx.c frag = WLAN_GET_SEQ_FRAG(sc); frag 99 net/ieee80211/ieee80211_rx.c if (frag == 0) { frag 119 net/ieee80211/ieee80211_rx.c entry->last_frag = frag; frag 126 net/ieee80211/ieee80211_rx.c entry = ieee80211_frag_cache_find(ieee, seq, frag, hdr->addr2, frag 129 net/ieee80211/ieee80211_rx.c entry->last_frag = frag; frag 345 net/ieee80211/ieee80211_rx.c unsigned int frag; frag 375 net/ieee80211/ieee80211_rx.c frag = WLAN_GET_SEQ_FRAG(sc); frag 599 net/ieee80211/ieee80211_rx.c if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) { frag 602 net/ieee80211/ieee80211_rx.c IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); frag 609 net/ieee80211/ieee80211_rx.c WLAN_GET_SEQ_SEQ(sc), frag); frag 614 net/ieee80211/ieee80211_rx.c if (frag != 0) frag 625 net/ieee80211/ieee80211_rx.c if (frag == 0) { frag 166 net/ieee80211/ieee80211_tx.c res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); frag 171 net/ieee80211/ieee80211_tx.c ieee->dev->name, frag->len); frag 692 net/ipv4/icmp.c ntohs(icmph->un.frag.mtu), frag 478 net/ipv4/ip_output.c struct sk_buff *frag; frag 488 net/ipv4/ip_output.c for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { frag 490 net/ipv4/ip_output.c if (frag->len > mtu || frag 491 net/ipv4/ip_output.c ((frag->len & 7) && frag->next) || frag 492 net/ipv4/ip_output.c skb_headroom(frag) < hlen) frag 496 net/ipv4/ip_output.c if (skb_shared(frag)) frag 499 net/ipv4/ip_output.c BUG_ON(frag->sk); frag 502 net/ipv4/ip_output.c frag->sk = skb->sk; frag 503 net/ipv4/ip_output.c frag->destructor = sock_wfree; frag 504 net/ipv4/ip_output.c truesizes += frag->truesize; frag 512 net/ipv4/ip_output.c frag = skb_shinfo(skb)->frag_list; frag 524 net/ipv4/ip_output.c if (frag) { frag 525 net/ipv4/ip_output.c frag->ip_summed = CHECKSUM_NONE; frag 526 net/ipv4/ip_output.c skb_reset_transport_header(frag); frag 527 net/ipv4/ip_output.c __skb_push(frag, hlen); frag 528 net/ipv4/ip_output.c skb_reset_network_header(frag); frag 529 net/ipv4/ip_output.c memcpy(skb_network_header(frag), iph, hlen); frag 530 net/ipv4/ip_output.c iph = ip_hdr(frag); frag 531 net/ipv4/ip_output.c iph->tot_len = htons(frag->len); frag 532 net/ipv4/ip_output.c ip_copy_metadata(frag, skb); frag 534 net/ipv4/ip_output.c ip_options_fragment(frag); frag 537 net/ipv4/ip_output.c if (frag->next != NULL) frag 547 net/ipv4/ip_output.c if (err || !frag) frag 550 net/ipv4/ip_output.c skb = frag; frag 551 net/ipv4/ip_output.c frag = skb->next; frag 560 net/ipv4/ip_output.c while (frag) { frag 561 net/ipv4/ip_output.c skb = frag->next; frag 562 net/ipv4/ip_output.c kfree_skb(frag); frag 563 net/ipv4/ip_output.c frag = skb; frag 998 net/ipv4/ip_output.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; frag 1006 net/ipv4/ip_output.c if (page != frag->page) { frag 1013 net/ipv4/ip_output.c frag = &skb_shinfo(skb)->frags[i]; frag 1027 net/ipv4/ip_output.c frag = &skb_shinfo(skb)->frags[i]; frag 1032 net/ipv4/ip_output.c if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { frag 1037 net/ipv4/ip_output.c frag->size += copy; frag 282 net/ipv4/netfilter/ipt_LOG.c printk("MTU=%u ", ntohs(ich->un.frag.mtu)); frag 624 net/ipv6/ip6_output.c struct sk_buff *frag; frag 668 net/ipv6/ip6_output.c for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { frag 670 net/ipv6/ip6_output.c if (frag->len > mtu || frag 671 net/ipv6/ip6_output.c ((frag->len & 7) && frag->next) || frag 672 net/ipv6/ip6_output.c skb_headroom(frag) < hlen) frag 676 net/ipv6/ip6_output.c if (skb_shared(frag)) frag 679 net/ipv6/ip6_output.c BUG_ON(frag->sk); frag 682 net/ipv6/ip6_output.c frag->sk = skb->sk; frag 683 net/ipv6/ip6_output.c frag->destructor = sock_wfree; frag 684 net/ipv6/ip6_output.c truesizes += frag->truesize; frag 690 net/ipv6/ip6_output.c frag = skb_shinfo(skb)->frag_list; frag 726 net/ipv6/ip6_output.c if (frag) { frag 727 net/ipv6/ip6_output.c frag->ip_summed = CHECKSUM_NONE; frag 728 net/ipv6/ip6_output.c skb_reset_transport_header(frag); frag 729 net/ipv6/ip6_output.c fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); frag 730 net/ipv6/ip6_output.c __skb_push(frag, hlen); frag 731 net/ipv6/ip6_output.c skb_reset_network_header(frag); frag 732 net/ipv6/ip6_output.c memcpy(skb_network_header(frag), tmp_hdr, frag 738 net/ipv6/ip6_output.c if (frag->next != NULL) frag 741 net/ipv6/ip6_output.c ipv6_hdr(frag)->payload_len = frag 742 net/ipv6/ip6_output.c htons(frag->len - frag 744 net/ipv6/ip6_output.c ip6_copy_metadata(frag, skb); frag 752 net/ipv6/ip6_output.c if (err || !frag) frag 755 net/ipv6/ip6_output.c skb = frag; frag 756 net/ipv6/ip6_output.c frag = skb->next; frag 769 net/ipv6/ip6_output.c while (frag) { frag 770 net/ipv6/ip6_output.c skb = frag->next; frag 771 net/ipv6/ip6_output.c kfree_skb(frag); frag 772 net/ipv6/ip6_output.c frag = skb; frag 808 net/ipv6/ip6_output.c if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { frag 820 net/ipv6/ip6_output.c ip6_copy_metadata(frag, skb); frag 821 net/ipv6/ip6_output.c skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); frag 822 net/ipv6/ip6_output.c skb_put(frag, len + hlen + sizeof(struct frag_hdr)); frag 823 net/ipv6/ip6_output.c skb_reset_network_header(frag); frag 824 net/ipv6/ip6_output.c fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); frag 825 net/ipv6/ip6_output.c frag->transport_header = (frag->network_header + hlen + frag 833 net/ipv6/ip6_output.c skb_set_owner_w(frag, skb->sk); frag 838 net/ipv6/ip6_output.c skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); frag 854 net/ipv6/ip6_output.c if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) frag 861 net/ipv6/ip6_output.c ipv6_hdr(frag)->payload_len = htons(frag->len - frag 870 net/ipv6/ip6_output.c err = output(frag); frag 1352 net/ipv6/ip6_output.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; frag 1360 net/ipv6/ip6_output.c if (page != frag->page) { frag 1367 net/ipv6/ip6_output.c frag = &skb_shinfo(skb)->frags[i]; frag 1381 net/ipv6/ip6_output.c frag = &skb_shinfo(skb)->frags[i]; frag 1386 net/ipv6/ip6_output.c if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) { frag 1391 net/ipv6/ip6_output.c frag->size += copy; frag 235 net/irda/irttp.c struct sk_buff *skb, *frag; frag 258 net/irda/irttp.c while ((frag = skb_dequeue(&self->rx_fragments)) != NULL) { frag 259 net/irda/irttp.c skb_copy_to_linear_data_offset(skb, n, frag->data, frag->len); frag 260 net/irda/irttp.c n += frag->len; frag 262 net/irda/irttp.c dev_kfree_skb(frag); frag 293 net/irda/irttp.c struct sk_buff *frag; frag 309 net/irda/irttp.c frag = alloc_skb(self->max_seg_size+self->max_header_size, frag 311 net/irda/irttp.c if (!frag) frag 314 net/irda/irttp.c skb_reserve(frag, self->max_header_size); frag 317 net/irda/irttp.c skb_copy_from_linear_data(skb, skb_put(frag, self->max_seg_size), frag 321 net/irda/irttp.c frame = skb_push(frag, TTP_HEADER); frag 328 net/irda/irttp.c skb_queue_tail(&self->tx_queue, frag); frag 543 net/mac80211/main.c u16 frag, type; frag 605 net/mac80211/main.c frag = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; frag 609 net/mac80211/main.c if (frag == 0) { frag 628 net/mac80211/main.c if (frag == 0) frag 810 net/mac80211/rx.c entry->last_frag = frag; frag 836 net/mac80211/rx.c entry->last_frag + 1 != frag) frag 866 net/mac80211/rx.c unsigned int frag, seq; frag 874 net/mac80211/rx.c frag = sc & IEEE80211_SCTL_FRAG; frag 876 net/mac80211/rx.c if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || frag 886 net/mac80211/rx.c if (frag == 0) { frag 888 net/mac80211/rx.c entry = ieee80211_reassemble_add(rx->sdata, frag, seq, frag 905 net/mac80211/rx.c entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); frag 932 net/mac80211/rx.c entry->last_frag = frag; frag 637 net/mac80211/tx.c struct sk_buff **frags, *first, *frag; frag 680 net/mac80211/tx.c frag = frags[i] = frag 685 net/mac80211/tx.c if (!frag) frag 689 net/mac80211/tx.c frag->priority = first->priority; frag 690 net/mac80211/tx.c skb_reserve(frag, tx->local->tx_headroom + frag 692 net/mac80211/tx.c fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); frag 698 net/mac80211/tx.c memcpy(skb_put(frag, copylen), pos, copylen); frag 699 net/mac80211/tx.c memcpy(frag->cb, first->cb, sizeof(frag->cb)); frag 700 net/mac80211/tx.c skb_copy_queue_mapping(frag, first); frag 701 net/mac80211/tx.c frag->do_not_encrypt = first->do_not_encrypt; frag 762 net/mac80211/wext.c if (frag->disabled) frag 764 net/mac80211/wext.c else if (!frag->fixed) frag 766 net/mac80211/wext.c else if (frag->value < 256 || frag 767 net/mac80211/wext.c frag->value > IEEE80211_MAX_FRAG_THRESHOLD) frag 771 net/mac80211/wext.c local->fragmentation_threshold = frag->value & ~0x1; frag 791 net/mac80211/wext.c frag->value = local->fragmentation_threshold; frag 792 net/mac80211/wext.c frag->disabled = (frag->value >= IEEE80211_MAX_RTS_THRESHOLD); frag 793 net/mac80211/wext.c frag->fixed = 1; frag 87 net/rxrpc/ar-error.c mtu = ntohs(icmp_hdr(skb)->un.frag.mtu); frag 164 net/sctp/chunk.c __u8 frag; frag 221 net/sctp/chunk.c frag = SCTP_DATA_MIDDLE_FRAG; frag 224 net/sctp/chunk.c frag |= SCTP_DATA_FIRST_FRAG; frag 227 net/sctp/chunk.c frag |= SCTP_DATA_LAST_FRAG; frag 229 net/sctp/chunk.c chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); frag 256 net/sctp/chunk.c frag = SCTP_DATA_NOT_FRAG; frag 258 net/sctp/chunk.c frag = SCTP_DATA_LAST_FRAG; frag 260 net/sctp/chunk.c chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); frag 6535 net/sctp/socket.c struct sk_buff *frag; frag 6541 net/sctp/socket.c for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) frag 6542 net/sctp/socket.c sctp_sock_rfree_frag(frag); frag 6550 net/sctp/socket.c struct sk_buff *frag; frag 6556 net/sctp/socket.c for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) frag 6557 net/sctp/socket.c sctp_skb_set_owner_r_frag(frag, sk); frag 963 net/sctp/ulpevent.c struct sk_buff *skb, *frag; frag 979 net/sctp/ulpevent.c for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { frag 980 net/sctp/ulpevent.c sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); frag 989 net/sctp/ulpevent.c struct sk_buff *skb, *frag; frag 1006 net/sctp/ulpevent.c for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { frag 1011 net/sctp/ulpevent.c sctp_ulpevent_release_frag_data(sctp_skb2event(frag)); frag 1021 net/sctp/ulpevent.c struct sk_buff *skb, *frag; frag 1029 net/sctp/ulpevent.c for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { frag 1034 net/sctp/ulpevent.c sctp_ulpevent_release_frag_data(sctp_skb2event(frag)); frag 725 net/xfrm/xfrm_algo.c skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; frag 731 net/xfrm/xfrm_algo.c sg_set_page(&sg, frag->page, copy, frag 732 net/xfrm/xfrm_algo.c frag->page_offset + offset-start); frag 73 net/xfrm/xfrm_ipcomp.c skb_frag_t *frag; frag 79 net/xfrm/xfrm_ipcomp.c frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; frag 80 net/xfrm/xfrm_ipcomp.c frag->page = alloc_page(GFP_ATOMIC); frag 83 net/xfrm/xfrm_ipcomp.c if (!frag->page) frag 90 net/xfrm/xfrm_ipcomp.c memcpy(page_address(frag->page), scratch, len); frag 92 net/xfrm/xfrm_ipcomp.c frag->page_offset = 0; frag 93 net/xfrm/xfrm_ipcomp.c frag->size = len; frag 447 sound/pci/cs4281.c int frag; /* period number */ frag 752 sound/pci/cs4281.c dma->frag = 0; /* for workaround */ frag 1838 sound/pci/cs4281.c cdma->frag++; frag 1839 sound/pci/cs4281.c if ((val & BA0_HDSR_DHTC) && !(cdma->frag & 1)) { frag 1840 sound/pci/cs4281.c cdma->frag--; frag 1845 sound/pci/cs4281.c if ((val & BA0_HDSR_DTC) && (cdma->frag & 1)) { frag 1846 sound/pci/cs4281.c cdma->frag--; frag 378 sound/pci/rme9652/rme9652.c unsigned int offset, frag; frag 399 sound/pci/rme9652/rme9652.c frag = status & RME9652_buffer_id; frag 403 sound/pci/rme9652/rme9652.c if (frag) frag 405 sound/pci/rme9652/rme9652.c } else if (!frag) frag 412 sound/pci/rme9652/rme9652.c if (!frag) frag 414 sound/pci/rme9652/rme9652.c } else if (frag) frag 621 sound/soc/blackfin/bf5xx-sport.c ++(*frag); frag 622 sound/soc/blackfin/bf5xx-sport.c if (tx == 1 && *frag == sport->tx_frags) frag 623 sound/soc/blackfin/bf5xx-sport.c *frag = 0; frag 625 sound/soc/blackfin/bf5xx-sport.c if (tx == 0 && *frag == sport->rx_frags) frag 626 sound/soc/blackfin/bf5xx-sport.c *frag = 0; frag 632 sound/soc/blackfin/bf5xx-sport.c --(*frag); frag 633 sound/soc/blackfin/bf5xx-sport.c if (tx == 1 && *frag == 0) frag 634 sound/soc/blackfin/bf5xx-sport.c *frag = sport->tx_frags; frag 636 sound/soc/blackfin/bf5xx-sport.c if (tx == 0 && *frag == 0) frag 637 sound/soc/blackfin/bf5xx-sport.c *frag = sport->rx_frags;