new 463 arch/x86/kernel/acpi/boot.c unsigned int old, new; new 473 arch/x86/kernel/acpi/boot.c new = acpi_noirq ? old : 0; new 481 arch/x86/kernel/acpi/boot.c new &= ~mask; new 484 arch/x86/kernel/acpi/boot.c new |= mask; new 488 arch/x86/kernel/acpi/boot.c if (old == new) new 491 arch/x86/kernel/acpi/boot.c printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old); new 492 arch/x86/kernel/acpi/boot.c outb(new, 0x4d0); new 493 arch/x86/kernel/acpi/boot.c outb(new >> 8, 0x4d1); new 1852 arch/x86/kernel/acpi/boot.c unsigned int old, new, val; new 1855 arch/x86/kernel/acpi/boot.c new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); new 1856 arch/x86/kernel/acpi/boot.c val = cmpxchg(lock, old, new); new 1858 arch/x86/kernel/acpi/boot.c return (new < 3) ? -1 : 0; new 1863 arch/x86/kernel/acpi/boot.c unsigned int old, new, val; new 1866 arch/x86/kernel/acpi/boot.c new = old & ~0x3; new 1867 arch/x86/kernel/acpi/boot.c val = cmpxchg(lock, old, new); new 19 arch/x86/kernel/cpu/cmpxchg.c *(u8 *)ptr = new; new 34 arch/x86/kernel/cpu/cmpxchg.c *(u16 *)ptr = new; new 49 arch/x86/kernel/cpu/cmpxchg.c *(u32 *)ptr = new; new 66 arch/x86/kernel/cpu/cmpxchg.c *(u64 *)ptr = new; new 454 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c freqs.new = data->freq_table[next_state].frequency; new 463 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c if (!check_freqs(&cmd.mask, freqs.new, data)) { new 264 arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c freqs.new = target_fsb * fid * 100; new 267 arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c if (freqs.old == freqs.new) new 271 arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c freqs.old, freqs.new); new 62 arch/x86/kernel/cpu/cpufreq/e_powersaver.c freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); new 95 arch/x86/kernel/cpu/cpufreq/e_powersaver.c freqs.new = centaur->fsb * ((lo >> 8) & 0xff); new 125 arch/x86/kernel/cpu/cpufreq/elanfreq.c freqs.new = elan_multiplier[state].clock; new 264 arch/x86/kernel/cpu/cpufreq/gx-suspmod.c freqs.new = new_khz; new 314 arch/x86/kernel/cpu/cpufreq/gx-suspmod.c dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); new 272 arch/x86/kernel/cpu/cpufreq/longhaul.c freqs.new = speed; new 350 arch/x86/kernel/cpu/cpufreq/longhaul.c freqs.new = calc_speed(longhaul_get_cpu_mult()); new 352 arch/x86/kernel/cpu/cpufreq/longhaul.c if (unlikely(freqs.new != speed)) { new 119 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c freqs.new = stock_freq * p4clockmod_table[newstate].index / 8; new 121 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c if (freqs.new == freqs.old) new 81 arch/x86/kernel/cpu/cpufreq/powernow-k6.c freqs.new = busfreq * clock_ratio[best_i].index; new 264 arch/x86/kernel/cpu/cpufreq/powernow-k7.c freqs.new = powernow_table[index].frequency; new 273 arch/x86/kernel/cpu/cpufreq/powernow-k7.c if (freqs.old > freqs.new) { new 964 arch/x86/kernel/cpu/cpufreq/powernow-k8.c freqs.new = find_khz_freq_from_fid(fid); new 972 arch/x86/kernel/cpu/cpufreq/powernow-k8.c freqs.new = find_khz_freq_from_fid(data->currfid); new 995 arch/x86/kernel/cpu/cpufreq/powernow-k8.c freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); new 1003 arch/x86/kernel/cpu/cpufreq/powernow-k8.c freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); new 61 arch/x86/kernel/cpu/cpufreq/sc520_freq.c freqs.new = sc520_freq_table[state].frequency; new 546 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c freqs.new = extract_clock(msr, cpu, 0); new 549 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c target_freq, freqs.old, freqs.new, msr); new 594 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c tmp = freqs.new; new 595 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c freqs.new = freqs.old; new 271 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c freqs.new = speedstep_freqs[newstate].frequency; new 274 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c dprintk("transiting from %u to %u kHz\n", freqs.old, freqs.new); new 277 arch/x86/kernel/cpu/cpufreq/speedstep-ich.c if (freqs.old == freqs.new) new 232 arch/x86/kernel/cpu/cpufreq/speedstep-smi.c freqs.new = speedstep_freqs[newstate].frequency; new 235 arch/x86/kernel/cpu/cpufreq/speedstep-smi.c if (freqs.old == freqs.new) new 775 arch/x86/kernel/cpu/mcheck/mce_64.c unsigned long new = simple_strtoul(buf, &end, 0); \ new 777 arch/x86/kernel/cpu/mcheck/mce_64.c var = new; \ new 281 arch/x86/kernel/cpu/mcheck/mce_amd_64.c unsigned long new = simple_strtoul(buf, &end, 0); new 284 arch/x86/kernel/cpu/mcheck/mce_amd_64.c b->interrupt_enable = !!new; new 299 arch/x86/kernel/cpu/mcheck/mce_amd_64.c unsigned long new = simple_strtoul(buf, &end, 0); new 302 arch/x86/kernel/cpu/mcheck/mce_amd_64.c if (new > THRESHOLD_MAX) new 303 arch/x86/kernel/cpu/mcheck/mce_amd_64.c new = THRESHOLD_MAX; new 304 arch/x86/kernel/cpu/mcheck/mce_amd_64.c if (new < 1) new 305 arch/x86/kernel/cpu/mcheck/mce_amd_64.c new = 1; new 307 arch/x86/kernel/cpu/mcheck/mce_amd_64.c b->threshold_limit = new; new 65 arch/x86/kernel/ftrace.c unsigned new = *(unsigned *)new_code; /* 4 bytes */ new 89 arch/x86/kernel/ftrace.c : "r"(ip), "r"(new), "c"(newch), new 94 arch/x86/kernel/ftrace.c if (replaced != old && replaced != new) new 103 arch/x86/kernel/ftrace.c unsigned char old[MCOUNT_INSN_SIZE], *new; new 107 arch/x86/kernel/ftrace.c new = ftrace_call_replace(ip, (unsigned long)func); new 108 arch/x86/kernel/ftrace.c ret = ftrace_modify_code(ip, old, new); new 117 arch/x86/kernel/ftrace.c unsigned char old[MCOUNT_INSN_SIZE], *new; new 124 arch/x86/kernel/ftrace.c new = ftrace_call_replace(ip, *addr); new 125 arch/x86/kernel/ftrace.c *addr = ftrace_modify_code(ip, old, new); new 2447 arch/x86/kernel/io_apic_32.c int irq, new, vector = 0; new 2452 arch/x86/kernel/io_apic_32.c for (new = (NR_IRQS - 1); new >= 0; new--) { new 2453 arch/x86/kernel/io_apic_32.c if (platform_legacy_irq(new)) new 2455 arch/x86/kernel/io_apic_32.c if (irq_vector[new] != 0) new 2457 arch/x86/kernel/io_apic_32.c vector = __assign_irq_vector(new); new 2459 arch/x86/kernel/io_apic_32.c irq = new; new 2277 arch/x86/kernel/io_apic_64.c int new; new 2282 arch/x86/kernel/io_apic_64.c for (new = (NR_IRQS - 1); new >= 0; new--) { new 2283 arch/x86/kernel/io_apic_64.c if (platform_legacy_irq(new)) new 2285 arch/x86/kernel/io_apic_64.c if (irq_cfg[new].vector != 0) new 2287 arch/x86/kernel/io_apic_64.c if (__assign_irq_vector(new, TARGET_CPUS) == 0) new 2288 arch/x86/kernel/io_apic_64.c irq = new; new 90 arch/x86/kernel/ldt.c int err = alloc_ldt(new, old->size, 0); new 97 arch/x86/kernel/ldt.c write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); new 1051 arch/x86/kernel/mpparse.c unsigned char old, new; new 1056 arch/x86/kernel/mpparse.c new = mpf_checksum((unsigned char *)mpc, mpc->mpc_length); new 1057 arch/x86/kernel/mpparse.c if (old == new) { new 62 arch/x86/kernel/test_nx.c extable[0].insn = (unsigned long)new; new 617 arch/x86/kernel/tsc.c if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || new 618 arch/x86/kernel/tsc.c (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || new 620 arch/x86/kernel/tsc.c *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); new 622 arch/x86/kernel/tsc.c tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); new 86 arch/x86/kernel/vm86_32.c ((X) = ((X) & ~(mask)) | ((new) & (mask))) new 192 arch/x86/kernel/vmi_32.c if (gdt[nr].a != new->a || gdt[nr].b != new->b) new 193 arch/x86/kernel/vmi_32.c write_gdt_entry(gdt, nr, new, 0); new 1700 arch/x86/kvm/mmu.c paging32_update_pte(vcpu, sp, spte, new); new 1702 arch/x86/kvm/mmu.c paging64_update_pte(vcpu, sp, spte, new); new 1709 arch/x86/kvm/mmu.c if (!is_shadow_present_pte(new)) new 1711 arch/x86/kvm/mmu.c if ((old ^ new) & PT64_BASE_ADDR_MASK) new 1714 arch/x86/kvm/mmu.c new ^= PT64_NX_MASK; new 1715 arch/x86/kvm/mmu.c return (old & ~new & PT64_PERM_MASK) != 0; new 1720 arch/x86/kvm/mmu.c if (need_remote_flush(old, new)) new 1758 arch/x86/kvm/mmu.c memcpy((void *)&gpte + (gpa % 8), new, 4); new 1760 arch/x86/kvm/mmu.c memcpy((void *)&gpte, new, 8); new 1764 arch/x86/kvm/mmu.c memcpy((void *)&gpte, new, 4); new 1821 arch/x86/kvm/mmu.c mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); new 1888 arch/x86/kvm/mmu.c new = (const void *)&gentry; new 1890 arch/x86/kvm/mmu.c new = NULL; new 1895 arch/x86/kvm/mmu.c if (new) new 1896 arch/x86/kvm/mmu.c mmu_pte_write_new_pte(vcpu, sp, spte, new); new 2019 arch/x86/kvm/x86.c val = *(u64 *)new; new 2033 arch/x86/kvm/x86.c return emulator_write_emulated(addr, new, bytes, vcpu); new 1169 arch/x86/kvm/x86_emulate.c u64 old, new; new 1184 arch/x86/kvm/x86_emulate.c new = ((u64)c->regs[VCPU_REGS_RCX] << 32) | new 1187 arch/x86/kvm/x86_emulate.c rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu); new 187 arch/x86/mm/pat.c if (new->type != entry->type) { new 189 arch/x86/mm/pat.c new->type = entry->type; new 197 arch/x86/mm/pat.c if (new->end <= entry->start) new 199 arch/x86/mm/pat.c else if (new->type != entry->type) new 206 arch/x86/mm/pat.c "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start, new 207 arch/x86/mm/pat.c new->end, cattr_name(new->type), cattr_name(entry->type)); new 292 arch/x86/mm/pat.c struct memtype *new, *entry; new 342 arch/x86/mm/pat.c new = kmalloc(sizeof(struct memtype), GFP_KERNEL); new 343 arch/x86/mm/pat.c if (!new) new 346 arch/x86/mm/pat.c new->start = start; new 347 arch/x86/mm/pat.c new->end = end; new 348 arch/x86/mm/pat.c new->type = actual_type; new 368 arch/x86/mm/pat.c err = chk_conflict(new, entry, new_type); new 378 arch/x86/mm/pat.c err = chk_conflict(new, entry, new_type); new 404 arch/x86/mm/pat.c start, end, cattr_name(new->type), cattr_name(req_type)); new 405 arch/x86/mm/pat.c kfree(new); new 414 arch/x86/mm/pat.c list_add(&new->nd, where); new 416 arch/x86/mm/pat.c list_add_tail(&new->nd, &memtype_list); new 421 arch/x86/mm/pat.c start, end, cattr_name(new->type), cattr_name(req_type), new 49 arch/x86/pci/irq.c int new); new 236 crypto/async_tx/async_tx.c struct dma_chan_ref *new; new 238 crypto/async_tx/async_tx.c new = get_chan_ref_by_cap(cap, cpu_idx++); new 240 crypto/async_tx/async_tx.c new = get_chan_ref_by_cap(cap, -1); new 242 crypto/async_tx/async_tx.c per_cpu_ptr(channel_table[cap], cpu)->ref = new; new 188 fs/adfs/dir_f.c if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq || new 189 fs/adfs/dir_f.c memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4)) new 196 fs/adfs/dir_f.c if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte) new 355 fs/adfs/dir_f.c dir->parent_id = adfs_readval(dir->dirtail.new.dirparent, 3); new 415 fs/adfs/dir_f.c if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq || new 416 fs/adfs/dir_f.c memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4)) new 423 fs/adfs/dir_f.c if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte) new 62 fs/adfs/dir_f.h } new; new 345 fs/affs/bitmap.c u32 old, new; new 349 fs/affs/bitmap.c new = old & mask; new 351 fs/affs/bitmap.c ((__be32 *)bh->b_data)[offset] = cpu_to_be32(new); new 578 fs/afs/flock.c list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); new 589 fs/autofs4/root.c struct dentry *new = d_lookup(parent, &dentry->d_name); new 590 fs/autofs4/root.c if (new != NULL) new 591 fs/autofs4/root.c dentry = new; new 172 fs/bfs/dir.c err = bfs_add_entry(dir, new->d_name.name, new->d_name.len, new 182 fs/bfs/dir.c d_instantiate(new, inode); new 38 fs/bfs/file.c struct buffer_head *bh, *new; new 43 fs/bfs/file.c new = sb_getblk(sb, to); new 44 fs/bfs/file.c memcpy(new->b_data, bh->b_data, bh->b_size); new 45 fs/bfs/file.c mark_buffer_dirty(new); new 47 fs/bfs/file.c brelse(new); new 362 fs/char_dev.c struct cdev *new = NULL; new 374 fs/char_dev.c new = container_of(kobj, struct cdev, kobj); new 380 fs/char_dev.c inode->i_cdev = p = new; new 383 fs/char_dev.c new = NULL; new 389 fs/char_dev.c cdev_put(new); new 732 fs/compat_ioctl.c void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) + new 734 fs/compat_ioctl.c if (new > top) new 737 fs/compat_ioctl.c sgio = new; new 124 fs/configfs/dir.c if (strcmp(existing, new)) new 1196 fs/dcache.c struct dentry *new = NULL; new 1200 fs/dcache.c new = __d_find_alias(inode, 1); new 1201 fs/dcache.c if (new) { new 1202 fs/dcache.c BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); new 1203 fs/dcache.c fsnotify_d_instantiate(new, inode); new 1205 fs/dcache.c security_d_instantiate(new, inode); new 1207 fs/dcache.c d_move(new, dentry); new 1220 fs/dcache.c return new; new 1244 fs/dcache.c struct dentry *new; new 1250 fs/dcache.c new = d_alloc(dentry->d_parent, name); new 1251 fs/dcache.c if (!new) { new 1255 fs/dcache.c found = d_splice_alias(inode, new); new 1257 fs/dcache.c dput(new); new 1260 fs/dcache.c return new; new 1306 fs/dcache.c new = list_entry(inode->i_dentry.next, struct dentry, d_alias); new 1307 fs/dcache.c dget_locked(new); new 1312 fs/dcache.c d_move(new, found); new 1318 fs/dcache.c return new; new 295 fs/dlm/config.c int new; new 579 fs/dlm/config.c nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */ new 856 fs/dlm/config.c int *ids, *new; new 879 fs/dlm/config.c if (nd->new) new 889 fs/dlm/config.c new = kcalloc(new_count, sizeof(int), GFP_KERNEL); new 890 fs/dlm/config.c if (!new) { new 898 fs/dlm/config.c if (nd->new) { new 899 fs/dlm/config.c new[i++] = nd->nodeid; new 900 fs/dlm/config.c nd->new = 0; new 904 fs/dlm/config.c *new_out = new; new 135 fs/dlm/dlm_internal.h int *new; /* nodeids of new members */ new 734 fs/dlm/lock.c list_add_tail(new, head); new 736 fs/dlm/lock.c __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue); new 25 fs/dlm/member.c struct list_head *newlist = &new->list; new 30 fs/dlm/member.c if (new->nodeid < memb->nodeid) new 218 fs/dlm/member.c if (!dlm_is_member(ls, rv->new[i])) new 220 fs/dlm/member.c log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]); new 225 fs/dlm/member.c memb->nodeid = rv->new[i]; new 271 fs/dlm/member.c int new; new 294 fs/dlm/member.c new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); new 313 fs/dlm/member.c if (new) new 334 fs/dlm/member.c int *ids = NULL, *new = NULL; new 342 fs/dlm/member.c &new, &new_count); new 359 fs/dlm/member.c rv->new = new; new 370 fs/dlm/member.c kfree(rv_old->new); new 380 fs/dlm/member.c kfree(new); new 260 fs/dlm/recoverd.c kfree(rv->new); new 1378 fs/exec.c if (new) { new 1379 fs/exec.c if (!try_module_get(new->module)) new 1382 fs/exec.c current->binfmt = new; new 767 fs/ext3/namei.c struct dx_entry *old = frame->at, *new = old + 1; new 772 fs/ext3/namei.c memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); new 773 fs/ext3/namei.c dx_set_hash(new, hash); new 774 fs/ext3/namei.c dx_set_block(new, block); new 1845 fs/ext4/inode.c struct buffer_head new; new 1854 fs/ext4/inode.c new.b_state = lbh->b_state; new 1855 fs/ext4/inode.c new.b_blocknr = 0; new 1856 fs/ext4/inode.c new.b_size = lbh->b_size; new 1862 fs/ext4/inode.c if (!new.b_size) new 1864 fs/ext4/inode.c err = mpd->get_block(mpd->inode, next, &new, 1); new 1905 fs/ext4/inode.c BUG_ON(new.b_size == 0); new 1907 fs/ext4/inode.c if (buffer_new(&new)) new 1908 fs/ext4/inode.c __unmap_underlying_blocks(mpd->inode, &new); new 1915 fs/ext4/inode.c mpage_put_bnr_to_bhs(mpd, next, &new); new 769 fs/ext4/namei.c struct dx_entry *old = frame->at, *new = old + 1; new 774 fs/ext4/namei.c memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); new 775 fs/ext4/namei.c dx_set_hash(new, hash); new 776 fs/ext4/namei.c dx_set_block(new, block); new 124 fs/fat/cache.c if (p->fcluster == new->fcluster) { new 125 fs/fat/cache.c BUG_ON(p->dcluster != new->dcluster); new 126 fs/fat/cache.c if (new->nr_contig > p->nr_contig) new 127 fs/fat/cache.c p->nr_contig = new->nr_contig; new 138 fs/fat/cache.c if (new->fcluster == -1) /* dummy cache */ new 142 fs/fat/cache.c if (new->id != FAT_CACHE_VALID && new 143 fs/fat/cache.c new->id != MSDOS_I(inode)->cache_valid_id) new 146 fs/fat/cache.c cache = fat_cache_merge(inode, new); new 154 fs/fat/cache.c cache = fat_cache_merge(inode, new); new 165 fs/fat/cache.c cache->fcluster = new->fcluster; new 166 fs/fat/cache.c cache->dcluster = new->dcluster; new 167 fs/fat/cache.c cache->nr_contig = new->nr_contig; new 157 fs/fat/fatent.c if (new == FAT_ENT_EOF) new 158 fs/fat/fatent.c new = EOF_FAT12; new 162 fs/fat/fatent.c *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f); new 163 fs/fat/fatent.c *ent12_p[1] = new >> 4; new 165 fs/fat/fatent.c *ent12_p[0] = new & 0xff; new 166 fs/fat/fatent.c *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8); new 177 fs/fat/fatent.c if (new == FAT_ENT_EOF) new 178 fs/fat/fatent.c new = EOF_FAT16; new 180 fs/fat/fatent.c *fatent->u.ent16_p = cpu_to_le16(new); new 186 fs/fat/fatent.c if (new == FAT_ENT_EOF) new 187 fs/fat/fatent.c new = EOF_FAT32; new 189 fs/fat/fatent.c WARN_ON(new & 0xf0000000); new 190 fs/fat/fatent.c new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff; new 191 fs/fat/fatent.c *fatent->u.ent32_p = cpu_to_le32(new); new 393 fs/fat/fatent.c ops->ent_put(fatent, new); new 510 fs/fcntl.c struct fasync_struct *new = NULL; new 514 fs/fcntl.c new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); new 515 fs/fcntl.c if (!new) new 523 fs/fcntl.c kmem_cache_free(fasync_cache, new); new 534 fs/fcntl.c new->magic = FASYNC_MAGIC; new 535 fs/fcntl.c new->fa_file = filp; new 536 fs/fcntl.c new->fa_fd = fd; new 537 fs/fcntl.c new->fa_next = *fapp; new 538 fs/fcntl.c *fapp = new; new 646 fs/gfs2/bmap.c int create = *new; new 650 fs/gfs2/bmap.c BUG_ON(!new); new 657 fs/gfs2/bmap.c *new = 1; new 659 fs/gfs2/bmap.c *new = 0; new 162 fs/gfs2/dir.c int new = 0; new 193 fs/gfs2/dir.c new = 1; new 194 fs/gfs2/dir.c error = gfs2_extent_map(&ip->i_inode, lblock, &new, new 203 fs/gfs2/dir.c if (amount == sdp->sd_jbsize || new) new 301 fs/gfs2/dir.c int new; new 308 fs/gfs2/dir.c new = 0; new 309 fs/gfs2/dir.c error = gfs2_extent_map(&ip->i_inode, lblock, &new, new 936 fs/gfs2/dir.c struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new; new 1016 fs/gfs2/dir.c new = gfs2_dirent_alloc(inode, nbh, &str); new 1017 fs/gfs2/dir.c if (IS_ERR(new)) { new 1018 fs/gfs2/dir.c error = PTR_ERR(new); new 1022 fs/gfs2/dir.c new->de_inum = dent->de_inum; /* No endian worries */ new 1023 fs/gfs2/dir.c new->de_type = dent->de_type; /* No endian worries */ new 774 fs/gfs2/eattr.c struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea + new 782 fs/gfs2/eattr.c new->ea_rec_len = cpu_to_be32(new_size); new 783 fs/gfs2/eattr.c new->ea_flags = last; new 785 fs/gfs2/eattr.c return new; new 383 fs/gfs2/meta_io.c if (new) { new 1136 fs/gfs2/quota.c int new = 0; new 1137 fs/gfs2/quota.c error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); new 35 fs/gfs2/recovery.c int new = 0; new 40 fs/gfs2/recovery.c error = gfs2_extent_map(&ip->i_inode, blk, &new, &dblock, &extlen); new 141 fs/hppfs/hppfs.c struct dentry *proc_dentry, *new, *parent; new 161 fs/hppfs/hppfs.c new = (*parent->d_inode->i_op->lookup)(parent->d_inode, new 163 fs/hppfs/hppfs.c if (new) { new 165 fs/hppfs/hppfs.c proc_dentry = new; new 341 fs/hppfs/hppfs.c struct hppfs_data *data, *new, *head; new 382 fs/hppfs/hppfs.c new = kmalloc(sizeof(*data), GFP_KERNEL); new 383 fs/hppfs/hppfs.c if (new == 0) { new 390 fs/hppfs/hppfs.c INIT_LIST_HEAD(&new->list); new 391 fs/hppfs/hppfs.c list_add(&new->list, &data->list); new 392 fs/hppfs/hppfs.c data = new; new 682 fs/inotify.c new->mask = old->mask; new 683 fs/inotify.c new->ih = ih; new 688 fs/inotify.c ret = inotify_handle_get_wd(ih, new); new 691 fs/inotify.c ret = new->wd; new 695 fs/inotify.c new->inode = igrab(old->inode); new 697 fs/inotify.c list_add(&new->h_list, &ih->watches); new 698 fs/inotify.c list_add(&new->i_list, &old->inode->inotify_watches); new 43 fs/isofs/dir.c new[i] = c; new 314 fs/jffs2/fs.c f->metadata->size != sizeof(jdev.new)) { new 328 fs/jffs2/fs.c rdev = new_decode_dev(je32_to_cpu(jdev.new)); new 29 fs/jffs2/nodelist.c dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino); new 31 fs/jffs2/nodelist.c while ((*prev) && (*prev)->nhash <= new->nhash) { new 32 fs/jffs2/nodelist.c if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { new 34 fs/jffs2/nodelist.c if (new->version < (*prev)->version) { new 37 fs/jffs2/nodelist.c jffs2_mark_node_obsolete(c, new->raw); new 38 fs/jffs2/nodelist.c jffs2_free_full_dirent(new); new 42 fs/jffs2/nodelist.c new->next = (*prev)->next; new 48 fs/jffs2/nodelist.c *prev = new; new 54 fs/jffs2/nodelist.c new->next = *prev; new 55 fs/jffs2/nodelist.c *prev = new; new 440 fs/jffs2/nodelist.c if (!new->ino) new 441 fs/jffs2/nodelist.c new->ino = ++c->highest_ino; new 443 fs/jffs2/nodelist.c dbg_inocache("add %p (ino #%u)\n", new, new->ino); new 445 fs/jffs2/nodelist.c prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; new 447 fs/jffs2/nodelist.c while ((*prev) && (*prev)->ino < new->ino) { new 450 fs/jffs2/nodelist.c new->next = *prev; new 451 fs/jffs2/nodelist.c *prev = new; new 318 fs/jffs2/nodelist.h jdev->new = cpu_to_je32(new_encode_dev(rdev)); new 319 fs/jffs2/nodelist.h return sizeof(jdev->new); new 416 fs/jffs2/nodemgmt.c struct jffs2_raw_node_ref *new; new 439 fs/jffs2/nodemgmt.c new = jffs2_link_node_ref(c, jeb, ofs, len, ic); new 460 fs/jffs2/nodemgmt.c return new; new 84 fs/jffs2/wbuf.c struct jffs2_inodirty *new; new 92 fs/jffs2/wbuf.c new = kmalloc(sizeof(*new), GFP_KERNEL); new 93 fs/jffs2/wbuf.c if (!new) { new 99 fs/jffs2/wbuf.c new->ino = ino; new 100 fs/jffs2/wbuf.c new->next = c->wbuf_inodes; new 101 fs/jffs2/wbuf.c c->wbuf_inodes = new; new 621 fs/jfs/jfs_metapage.c if (new && (PSIZE == PAGE_CACHE_SIZE)) { new 650 fs/jfs/jfs_metapage.c if (!new) { new 677 fs/jfs/jfs_metapage.c if (new) { new 97 fs/lockd/clntproc.c struct nlm_lockowner *res, *new = NULL; new 103 fs/lockd/clntproc.c new = kmalloc(sizeof(*new), GFP_KERNEL); new 106 fs/lockd/clntproc.c if (res == NULL && new != NULL) { new 107 fs/lockd/clntproc.c res = new; new 108 fs/lockd/clntproc.c atomic_set(&new->count, 1); new 109 fs/lockd/clntproc.c new->owner = owner; new 110 fs/lockd/clntproc.c new->pid = __nlm_alloc_pid(host); new 111 fs/lockd/clntproc.c new->host = nlm_get_host(host); new 112 fs/lockd/clntproc.c list_add(&new->list, &host->h_lockowners); new 113 fs/lockd/clntproc.c new = NULL; new 117 fs/lockd/clntproc.c kfree(new); new 449 fs/lockd/clntproc.c new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; new 450 fs/lockd/clntproc.c new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner); new 451 fs/lockd/clntproc.c list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); new 215 fs/locks.c fl->fl_ops->fl_copy_lock(new, fl); new 216 fs/locks.c new->fl_ops = fl->fl_ops; new 220 fs/locks.c fl->fl_lmops->fl_copy_lock(new, fl); new 221 fs/locks.c new->fl_lmops = fl->fl_lmops; new 230 fs/locks.c new->fl_owner = fl->fl_owner; new 231 fs/locks.c new->fl_pid = fl->fl_pid; new 232 fs/locks.c new->fl_file = NULL; new 233 fs/locks.c new->fl_flags = fl->fl_flags; new 234 fs/locks.c new->fl_type = fl->fl_type; new 235 fs/locks.c new->fl_start = fl->fl_start; new 236 fs/locks.c new->fl_end = fl->fl_end; new 237 fs/locks.c new->fl_ops = NULL; new 238 fs/locks.c new->fl_lmops = NULL; new 244 fs/locks.c locks_release_private(new); new 246 fs/locks.c __locks_copy_lock(new, fl); new 247 fs/locks.c new->fl_file = fl->fl_file; new 248 fs/locks.c new->fl_ops = fl->fl_ops; new 249 fs/locks.c new->fl_lmops = fl->fl_lmops; new 251 fs/locks.c locks_copy_private(new, fl); new 1218 fs/namei.c struct dentry *new; new 1225 fs/namei.c new = d_alloc(base, name); new 1227 fs/namei.c if (!new) new 1229 fs/namei.c dentry = inode->i_op->lookup(inode, new, nd); new 1231 fs/namei.c dentry = new; new 1233 fs/namei.c dput(new); new 2182 fs/namespace.c struct path new, old, parent_path, root_parent, root; new 2188 fs/namespace.c error = user_path_dir(new_root, &new); new 2192 fs/namespace.c if (!check_mnt(new.mnt)) new 2199 fs/namespace.c error = security_sb_pivotroot(&old, &new); new 2213 fs/namespace.c IS_MNT_SHARED(new.mnt->mnt_parent) || new 2219 fs/namespace.c if (IS_DEADDIR(new.dentry->d_inode)) new 2221 fs/namespace.c if (d_unhashed(new.dentry) && !IS_ROOT(new.dentry)) new 2226 fs/namespace.c if (new.mnt == root.mnt || new 2234 fs/namespace.c if (new.mnt->mnt_root != new.dentry) new 2236 fs/namespace.c if (new.mnt->mnt_parent == new.mnt) new 2241 fs/namespace.c if (tmp != new.mnt) { new 2245 fs/namespace.c if (tmp->mnt_parent == new.mnt) new 2249 fs/namespace.c if (!is_subdir(tmp->mnt_mountpoint, new.dentry)) new 2251 fs/namespace.c } else if (!is_subdir(old.dentry, new.dentry)) new 2253 fs/namespace.c detach_mnt(new.mnt, &parent_path); new 2258 fs/namespace.c attach_mnt(new.mnt, &root_parent); new 2261 fs/namespace.c chroot_fs_refs(&root, &new); new 2262 fs/namespace.c security_sb_post_pivotroot(&root, &new); new 2272 fs/namespace.c path_put(&new); new 748 fs/ncpfs/ioctl.c void* new; new 767 fs/ncpfs/ioctl.c new = kmalloc(user.len, GFP_USER); new 768 fs/ncpfs/ioctl.c if (!new) new 770 fs/ncpfs/ioctl.c if (copy_from_user(new, user.data, user.len)) { new 771 fs/ncpfs/ioctl.c kfree(new); new 775 fs/ncpfs/ioctl.c new = NULL; new 781 fs/ncpfs/ioctl.c server->priv.data = new; new 358 fs/nfs/client.c struct nfs_client *clp, *new = NULL; new 371 fs/nfs/client.c if (new) new 376 fs/nfs/client.c new = nfs_alloc_client(cl_init); new 377 fs/nfs/client.c } while (new); new 383 fs/nfs/client.c clp = new; new 395 fs/nfs/client.c if (new) new 396 fs/nfs/client.c nfs_free_client(new); new 142 fs/nfs/nfs4state.c get_random_bytes(&new->id, sizeof(new->id)); new 143 fs/nfs/nfs4state.c new->id &= mask; new 144 fs/nfs/nfs4state.c if (new->id < minval) new 145 fs/nfs/nfs4state.c new->id += minval; new 154 fs/nfs/nfs4state.c if (new->id < pos->id) new 156 fs/nfs/nfs4state.c else if (new->id > pos->id) new 161 fs/nfs/nfs4state.c rb_link_node(&new->rb_node, parent, p); new 162 fs/nfs/nfs4state.c rb_insert_color(&new->rb_node, root); new 166 fs/nfs/nfs4state.c new->id++; new 167 fs/nfs/nfs4state.c if (new->id < minval || (new->id & mask) != new->id) { new 168 fs/nfs/nfs4state.c new->id = minval; new 175 fs/nfs/nfs4state.c if (new->id < pos->id) new 230 fs/nfs/nfs4state.c if (new->so_server < sp->so_server) { new 234 fs/nfs/nfs4state.c if (new->so_server > sp->so_server) { new 238 fs/nfs/nfs4state.c if (new->so_cred < sp->so_cred) new 240 fs/nfs/nfs4state.c else if (new->so_cred > sp->so_cred) new 247 fs/nfs/nfs4state.c nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64); new 248 fs/nfs/nfs4state.c rb_link_node(&new->so_client_node, parent, p); new 249 fs/nfs/nfs4state.c rb_insert_color(&new->so_client_node, &clp->cl_state_owners); new 250 fs/nfs/nfs4state.c return new; new 305 fs/nfs/nfs4state.c struct nfs4_state_owner *sp, *new; new 312 fs/nfs/nfs4state.c new = nfs4_alloc_state_owner(); new 313 fs/nfs/nfs4state.c if (new == NULL) new 315 fs/nfs/nfs4state.c new->so_client = clp; new 316 fs/nfs/nfs4state.c new->so_server = server; new 317 fs/nfs/nfs4state.c new->so_cred = cred; new 319 fs/nfs/nfs4state.c sp = nfs4_insert_state_owner(clp, new); new 321 fs/nfs/nfs4state.c if (sp == new) new 324 fs/nfs/nfs4state.c rpc_destroy_wait_queue(&new->so_sequence.wait); new 325 fs/nfs/nfs4state.c kfree(new); new 402 fs/nfs/nfs4state.c struct nfs4_state *state, *new; new 410 fs/nfs/nfs4state.c new = nfs4_alloc_open_state(); new 414 fs/nfs/nfs4state.c if (state == NULL && new != NULL) { new 415 fs/nfs/nfs4state.c state = new; new 428 fs/nfs/nfs4state.c if (new) new 429 fs/nfs/nfs4state.c nfs4_free_open_state(new); new 575 fs/nfs/nfs4state.c struct nfs4_lock_state *lsp, *new = NULL; new 582 fs/nfs/nfs4state.c if (new != NULL) { new 583 fs/nfs/nfs4state.c new->ls_state = state; new 584 fs/nfs/nfs4state.c list_add(&new->ls_locks, &state->lock_states); new 586 fs/nfs/nfs4state.c lsp = new; new 587 fs/nfs/nfs4state.c new = NULL; new 591 fs/nfs/nfs4state.c new = nfs4_alloc_lock_state(state, owner); new 592 fs/nfs/nfs4state.c if (new == NULL) new 596 fs/nfs/nfs4state.c if (new != NULL) new 597 fs/nfs/nfs4state.c nfs4_free_lock_state(new); new 679 fs/nfs/nfs4state.c struct nfs_seqid *new; new 681 fs/nfs/nfs4state.c new = kmalloc(sizeof(*new), GFP_KERNEL); new 682 fs/nfs/nfs4state.c if (new != NULL) { new 683 fs/nfs/nfs4state.c new->sequence = counter; new 684 fs/nfs/nfs4state.c INIT_LIST_HEAD(&new->list); new 686 fs/nfs/nfs4state.c return new; new 118 fs/nfs/read.c struct nfs_page *new; new 124 fs/nfs/read.c new = nfs_create_request(ctx, inode, page, 0, len); new 125 fs/nfs/read.c if (IS_ERR(new)) { new 127 fs/nfs/read.c return PTR_ERR(new); new 132 fs/nfs/read.c nfs_list_add_request(new, &one_request); new 532 fs/nfs/read.c struct nfs_page *new; new 546 fs/nfs/read.c new = nfs_create_request(desc->ctx, inode, page, 0, len); new 547 fs/nfs/read.c if (IS_ERR(new)) new 552 fs/nfs/read.c if (!nfs_pageio_add_request(desc->pgio, new)) { new 558 fs/nfs/read.c error = PTR_ERR(new); new 216 fs/nfsd/export.c struct svc_expkey *new = container_of(b, struct svc_expkey, h); new 218 fs/nfsd/export.c if (orig->ek_fsidtype != new->ek_fsidtype || new 219 fs/nfsd/export.c orig->ek_client != new->ek_client || new 220 fs/nfsd/export.c memcmp(orig->ek_fsid, new->ek_fsid, key_len(orig->ek_fsidtype)) != 0) new 228 fs/nfsd/export.c struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); new 232 fs/nfsd/export.c new->ek_client = item->ek_client; new 233 fs/nfsd/export.c new->ek_fsidtype = item->ek_fsidtype; new 235 fs/nfsd/export.c memcpy(new->ek_fsid, item->ek_fsid, sizeof(new->ek_fsid)); new 241 fs/nfsd/export.c struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); new 244 fs/nfsd/export.c new->ek_path = item->ek_path; new 296 fs/nfsd/export.c int hash = new->ek_fsidtype; new 297 fs/nfsd/export.c char * cp = (char*)new->ek_fsid; new 298 fs/nfsd/export.c int len = key_len(new->ek_fsidtype); new 301 fs/nfsd/export.c hash ^= hash_ptr(new->ek_client, EXPKEY_HASHBITS); new 304 fs/nfsd/export.c ch = sunrpc_cache_update(&svc_expkey_cache, &new->h, new 678 fs/nfsd/export.c struct svc_export *new = container_of(b, struct svc_export, h); new 679 fs/nfsd/export.c return orig->ex_client == new->ex_client && new 680 fs/nfsd/export.c orig->ex_path.dentry == new->ex_path.dentry && new 681 fs/nfsd/export.c orig->ex_path.mnt == new->ex_path.mnt; new 686 fs/nfsd/export.c struct svc_export *new = container_of(cnew, struct svc_export, h); new 690 fs/nfsd/export.c new->ex_client = item->ex_client; new 691 fs/nfsd/export.c new->ex_path.dentry = dget(item->ex_path.dentry); new 692 fs/nfsd/export.c new->ex_path.mnt = mntget(item->ex_path.mnt); new 693 fs/nfsd/export.c new->ex_pathname = NULL; new 694 fs/nfsd/export.c new->ex_fslocs.locations = NULL; new 695 fs/nfsd/export.c new->ex_fslocs.locations_count = 0; new 696 fs/nfsd/export.c new->ex_fslocs.migrated = 0; new 701 fs/nfsd/export.c struct svc_export *new = container_of(cnew, struct svc_export, h); new 705 fs/nfsd/export.c new->ex_flags = item->ex_flags; new 706 fs/nfsd/export.c new->ex_anon_uid = item->ex_anon_uid; new 707 fs/nfsd/export.c new->ex_anon_gid = item->ex_anon_gid; new 708 fs/nfsd/export.c new->ex_fsid = item->ex_fsid; new 709 fs/nfsd/export.c new->ex_uuid = item->ex_uuid; new 711 fs/nfsd/export.c new->ex_pathname = item->ex_pathname; new 713 fs/nfsd/export.c new->ex_fslocs.locations = item->ex_fslocs.locations; new 715 fs/nfsd/export.c new->ex_fslocs.locations_count = item->ex_fslocs.locations_count; new 717 fs/nfsd/export.c new->ex_fslocs.migrated = item->ex_fslocs.migrated; new 719 fs/nfsd/export.c new->ex_nflavors = item->ex_nflavors; new 721 fs/nfsd/export.c new->ex_flavors[i] = item->ex_flavors[i]; new 775 fs/nfsd/export.c ch = sunrpc_cache_update(&svc_export_cache, &new->h, new 999 fs/nfsd/export.c struct svc_export new; new 1031 fs/nfsd/export.c memset(&new, 0, sizeof(new)); new 1061 fs/nfsd/export.c new.h.expiry_time = NEVER; new 1062 fs/nfsd/export.c new.h.flags = 0; new 1063 fs/nfsd/export.c new.ex_pathname = kstrdup(nxp->ex_path, GFP_KERNEL); new 1064 fs/nfsd/export.c if (!new.ex_pathname) new 1066 fs/nfsd/export.c new.ex_client = clp; new 1067 fs/nfsd/export.c new.ex_path = nd.path; new 1068 fs/nfsd/export.c new.ex_flags = nxp->ex_flags; new 1069 fs/nfsd/export.c new.ex_anon_uid = nxp->ex_anon_uid; new 1070 fs/nfsd/export.c new.ex_anon_gid = nxp->ex_anon_gid; new 1071 fs/nfsd/export.c new.ex_fsid = nxp->ex_dev; new 1073 fs/nfsd/export.c exp = svc_export_lookup(&new); new 1075 fs/nfsd/export.c exp = svc_export_update(&new, exp); new 1088 fs/nfsd/export.c kfree(new.ex_pathname); new 457 fs/nfsd/nfs3xdr.c char *old, *new; new 474 fs/nfsd/nfs3xdr.c args->tname = new = new 482 fs/nfsd/nfs3xdr.c *new++ = *old++; new 494 fs/nfsd/nfs3xdr.c *new++ = *old++; new 498 fs/nfsd/nfs3xdr.c *new = '\0'; new 85 fs/nfsd/nfs4idmap.c struct ent *new = container_of(cnew, struct ent, h); new 88 fs/nfsd/nfs4idmap.c new->id = itm->id; new 89 fs/nfsd/nfs4idmap.c new->type = itm->type; new 91 fs/nfsd/nfs4idmap.c strlcpy(new->name, itm->name, sizeof(new->name)); new 92 fs/nfsd/nfs4idmap.c strlcpy(new->authname, itm->authname, sizeof(new->name)); new 293 fs/nfsd/nfs4idmap.c &new->h, &old->h, new 294 fs/nfsd/nfs4idmap.c idtoname_hash(new)); new 455 fs/nfsd/nfs4idmap.c &new->h, &old->h, new 456 fs/nfsd/nfs4idmap.c nametoid_hash(new)); new 697 fs/nfsd/nfs4state.c struct nfs4_client *conf, *unconf, *new; new 741 fs/nfsd/nfs4state.c new = create_client(clname, dname); new 742 fs/nfsd/nfs4state.c if (new == NULL) new 744 fs/nfsd/nfs4state.c gen_clid(new); new 758 fs/nfsd/nfs4state.c new = create_client(clname, dname); new 759 fs/nfsd/nfs4state.c if (new == NULL) new 761 fs/nfsd/nfs4state.c copy_clid(new, conf); new 768 fs/nfsd/nfs4state.c new = create_client(clname, dname); new 769 fs/nfsd/nfs4state.c if (new == NULL) new 771 fs/nfsd/nfs4state.c gen_clid(new); new 779 fs/nfsd/nfs4state.c new = create_client(clname, dname); new 780 fs/nfsd/nfs4state.c if (new == NULL) new 782 fs/nfsd/nfs4state.c gen_clid(new); new 784 fs/nfsd/nfs4state.c copy_verf(new, &clverifier); new 785 fs/nfsd/nfs4state.c new->cl_addr = sin->sin_addr.s_addr; new 786 fs/nfsd/nfs4state.c copy_cred(&new->cl_cred, &rqstp->rq_cred); new 787 fs/nfsd/nfs4state.c gen_confirm(new); new 788 fs/nfsd/nfs4state.c gen_callback(new, setclid); new 789 fs/nfsd/nfs4state.c add_to_unconfirmed(new, strhashval); new 790 fs/nfsd/nfs4state.c setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; new 791 fs/nfsd/nfs4state.c setclid->se_clientid.cl_id = new->cl_clientid.cl_id; new 792 fs/nfsd/nfs4state.c memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); new 1355 fs/nfsd/nfs4state.c struct nfs4_delegation *dp = (struct nfs4_delegation *)new->fl_owner; new 1357 fs/nfsd/nfs4state.c dprintk("NFSD: nfsd_copy_lock_deleg_cb: new fl %p dp %p\n", new, dp); new 1360 fs/nfsd/nfs4state.c dp->dl_flock = new; new 2147 fs/nfsd/nfs4state.c clientid_t *lockclid = &lock->v.new.clientid; new 2671 fs/nfsd/nfs4state.c &lock->v.new.owner); new 308 fs/nfsd/nfssvc.c int new = nthreads[i] * NFSD_MAXSERVS / tot; new 309 fs/nfsd/nfssvc.c tot -= (nthreads[i] - new); new 310 fs/nfsd/nfssvc.c nthreads[i] = new; new 830 fs/ocfs2/aops.c if (new) new 861 fs/ocfs2/aops.c if (ret == 0 || !new) new 1108 fs/ocfs2/aops.c if (new) new 1111 fs/ocfs2/aops.c new); new 1114 fs/ocfs2/aops.c map_from, map_to, new); new 1122 fs/ocfs2/aops.c if (new) { new 1132 fs/ocfs2/aops.c BUG_ON(!new); new 1138 fs/ocfs2/aops.c cluster_start, cluster_end, new); new 1155 fs/ocfs2/aops.c if (new && !PageUptodate(page)) new 1184 fs/ocfs2/aops.c if (new) { new 1244 fs/ocfs2/aops.c int ret, i, new, should_zero = 0; new 1249 fs/ocfs2/aops.c new = phys == 0 ? 1 : 0; new 1250 fs/ocfs2/aops.c if (new || unwritten) new 1253 fs/ocfs2/aops.c if (new) { new 1330 fs/ocfs2/aops.c if (ret && new) new 414 fs/ocfs2/dlm/dlmdebug.c loff_t new = -1; new 418 fs/ocfs2/dlm/dlmdebug.c new = off; new 421 fs/ocfs2/dlm/dlmdebug.c new = file->f_pos + off; new 425 fs/ocfs2/dlm/dlmdebug.c if (new < 0 || new > db->len) new 428 fs/ocfs2/dlm/dlmdebug.c return (file->f_pos = new); new 226 fs/ocfs2/super.c struct inode *new = NULL; new 232 fs/ocfs2/super.c new = ocfs2_iget(osb, osb->root_blkno, OCFS2_FI_FLAG_SYSFILE, 0); new 233 fs/ocfs2/super.c if (IS_ERR(new)) { new 234 fs/ocfs2/super.c status = PTR_ERR(new); new 238 fs/ocfs2/super.c osb->root_inode = new; new 240 fs/ocfs2/super.c new = ocfs2_iget(osb, osb->system_dir_blkno, OCFS2_FI_FLAG_SYSFILE, 0); new 241 fs/ocfs2/super.c if (IS_ERR(new)) { new 242 fs/ocfs2/super.c status = PTR_ERR(new); new 246 fs/ocfs2/super.c osb->sys_root_inode = new; new 250 fs/ocfs2/super.c new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); new 251 fs/ocfs2/super.c if (!new) { new 261 fs/ocfs2/super.c iput(new); new 271 fs/ocfs2/super.c struct inode *new = NULL; new 280 fs/ocfs2/super.c new = ocfs2_get_system_file_inode(osb, i, osb->slot_num); new 281 fs/ocfs2/super.c if (!new) { new 289 fs/ocfs2/super.c iput(new); new 265 fs/ocfs2/uptodate.c sector_t block = new->c_block; new 290 fs/ocfs2/uptodate.c rb_link_node(&new->c_node, parent, p); new 291 fs/ocfs2/uptodate.c rb_insert_color(&new->c_node, &ci->ci_cache.ci_tree); new 349 fs/ocfs2/uptodate.c struct ocfs2_meta_cache_item *new = NULL; new 357 fs/ocfs2/uptodate.c new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); new 358 fs/ocfs2/uptodate.c if (!new) { new 362 fs/ocfs2/uptodate.c new->c_block = block; new 392 fs/ocfs2/uptodate.c __ocfs2_insert_cache_tree(ci, new); new 395 fs/ocfs2/uptodate.c new = NULL; new 397 fs/ocfs2/uptodate.c if (new) new 398 fs/ocfs2/uptodate.c kmem_cache_free(ocfs2_uptodate_cachep, new); new 3153 fs/ocfs2/xattr.c if (!new) new 1552 fs/proc/base.c struct dentry *new; new 1553 fs/proc/base.c new = d_alloc(dir, &qname); new 1554 fs/proc/base.c if (new) { new 1555 fs/proc/base.c child = instantiate(dir->d_inode, new, task, ptr); new 1557 fs/proc/base.c dput(new); new 1559 fs/proc/base.c child = new; new 64 fs/proc/kcore.c new->addr = (unsigned long)addr; new 65 fs/proc/kcore.c new->size = size; new 68 fs/proc/kcore.c new->next = kclist; new 69 fs/proc/kcore.c kclist = new; new 232 fs/proc/vmcore.c struct vmcore *new; new 259 fs/proc/vmcore.c new = get_new_element(); new 260 fs/proc/vmcore.c if (!new) { new 264 fs/proc/vmcore.c new->paddr = phdr_ptr->p_offset; new 265 fs/proc/vmcore.c new->size = real_sz; new 266 fs/proc/vmcore.c list_add_tail(&new->list, vc_list); new 313 fs/proc/vmcore.c struct vmcore *new; new 340 fs/proc/vmcore.c new = get_new_element(); new 341 fs/proc/vmcore.c if (!new) { new 345 fs/proc/vmcore.c new->paddr = phdr_ptr->p_offset; new 346 fs/proc/vmcore.c new->size = real_sz; new 347 fs/proc/vmcore.c list_add_tail(&new->list, vc_list); new 388 fs/proc/vmcore.c struct vmcore *new; new 403 fs/proc/vmcore.c new = get_new_element(); new 404 fs/proc/vmcore.c if (!new) new 406 fs/proc/vmcore.c new->paddr = phdr_ptr->p_offset; new 407 fs/proc/vmcore.c new->size = phdr_ptr->p_memsz; new 408 fs/proc/vmcore.c list_add_tail(&new->list, vc_list); new 425 fs/proc/vmcore.c struct vmcore *new; new 440 fs/proc/vmcore.c new = get_new_element(); new 441 fs/proc/vmcore.c if (!new) new 443 fs/proc/vmcore.c new->paddr = phdr_ptr->p_offset; new 444 fs/proc/vmcore.c new->size = phdr_ptr->p_memsz; new 445 fs/proc/vmcore.c list_add_tail(&new->list, vc_list); new 165 fs/reiserfs/file.c int new; new 186 fs/reiserfs/file.c new = buffer_new(bh); new 203 fs/reiserfs/file.c (new || page->index >= i_size_index)) { new 240 fs/reiserfs/item_ops.c *start = new; new 249 fs/reiserfs/item_ops.c if (start == 0 && new == 0) { new 253 fs/reiserfs/item_ops.c if (start != 0 && (start + *len) == new) { new 213 fs/ubifs/misc.h dev->new = cpu_to_le32(new_encode_dev(rdev)); new 214 fs/ubifs/misc.h return sizeof(dev->new); new 78 fs/ubifs/orphan.c orphan->new = 1; new 148 fs/ubifs/orphan.c if (o->new) { new 176 fs/ubifs/orphan.c ubifs_assert(orphan->new); new 177 fs/ubifs/orphan.c orphan->new = 0; new 382 fs/ubifs/orphan.c if (orphan->new) new 447 fs/ubifs/orphan.c ubifs_assert(!orphan->new); new 207 fs/ubifs/super.c if (ui->data_len == sizeof(dev->new)) new 208 fs/ubifs/super.c rdev = new_decode_dev(le32_to_cpu(dev->new)); new 414 fs/ubifs/ubifs-media.h __le32 new; new 859 fs/ubifs/ubifs.h int new; new 294 fs/udf/inode.c int err, new; new 307 fs/udf/inode.c new = 0; new 320 fs/udf/inode.c bh = inode_getblk(inode, block, &err, &phys, &new); new 326 fs/udf/inode.c if (new) new 686 fs/udf/inode.c *new = 1; new 291 fs/ufs/inode.c *new = 1; new 396 fs/ufs/inode.c *new = 1; new 421 fs/ufs/inode.c int ret, err, new; new 436 fs/ufs/inode.c new = 0; new 458 fs/ufs/inode.c ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\ new 465 fs/ufs/inode.c &err, &phys, &new, bh_result->b_page) new 500 fs/ufs/inode.c if (new) new 106 fs/xfs/linux-2.6/kmem.c void *new; new 108 fs/xfs/linux-2.6/kmem.c new = kmem_alloc(newsize, flags); new 110 fs/xfs/linux-2.6/kmem.c if (new) new 111 fs/xfs/linux-2.6/kmem.c memcpy(new, ptr, new 115 fs/xfs/linux-2.6/kmem.c return new; new 1313 fs/xfs/xfs_alloc_btree.c xfs_alloc_block_t *new; /* new (root) btree block */ new 1339 fs/xfs/xfs_alloc_btree.c new = XFS_BUF_TO_ALLOC_BLOCK(nbp); new 1403 fs/xfs/xfs_alloc_btree.c new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); new 1404 fs/xfs/xfs_alloc_btree.c new->bb_level = cpu_to_be16(cur->bc_nlevels); new 1405 fs/xfs/xfs_alloc_btree.c new->bb_numrecs = cpu_to_be16(2); new 1406 fs/xfs/xfs_alloc_btree.c new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); new 1407 fs/xfs/xfs_alloc_btree.c new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); new 1416 fs/xfs/xfs_alloc_btree.c kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); new 1438 fs/xfs/xfs_alloc_btree.c pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); new 547 fs/xfs/xfs_bmap.c XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL, new 549 fs/xfs/xfs_bmap.c xfs_iext_insert(ifp, 0, 1, new); new 552 fs/xfs/xfs_bmap.c if (!ISNULLSTARTBLOCK(new->br_startblock)) { new 559 fs/xfs/xfs_bmap.c if (delta->xed_startoff > new->br_startoff) new 560 fs/xfs/xfs_bmap.c delta->xed_startoff = new->br_startoff; new 562 fs/xfs/xfs_bmap.c new->br_startoff + new->br_blockcount) new 563 fs/xfs/xfs_bmap.c delta->xed_blockcount = new->br_startoff + new 564 fs/xfs/xfs_bmap.c new->br_blockcount; new 570 fs/xfs/xfs_bmap.c else if (ISNULLSTARTBLOCK(new->br_startblock)) { new 574 fs/xfs/xfs_bmap.c if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new, new 585 fs/xfs/xfs_bmap.c if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, new 601 fs/xfs/xfs_bmap.c if (!ISNULLSTARTBLOCK(new->br_startblock) && new 602 fs/xfs/xfs_bmap.c new->br_startoff + new->br_blockcount > prev.br_startoff) { new 610 fs/xfs/xfs_bmap.c idx, &cur, new, &da_new, first, flist, new 613 fs/xfs/xfs_bmap.c } else if (new->br_state == XFS_EXT_NORM) { new 614 fs/xfs/xfs_bmap.c ASSERT(new->br_state == XFS_EXT_NORM); new 616 fs/xfs/xfs_bmap.c ip, idx, &cur, new, &logflags, delta))) new 619 fs/xfs/xfs_bmap.c ASSERT(new->br_state == XFS_EXT_UNWRITTEN); new 621 fs/xfs/xfs_bmap.c ip, idx, &cur, new, &logflags, delta))) new 634 fs/xfs/xfs_bmap.c new, &logflags, delta, whichfork))) new 744 fs/xfs/xfs_bmap.c new_endoff = new->br_startoff + new->br_blockcount; new 745 fs/xfs/xfs_bmap.c ASSERT(PREV.br_startoff <= new->br_startoff); new 751 fs/xfs/xfs_bmap.c STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); new 764 fs/xfs/xfs_bmap.c LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && new 765 fs/xfs/xfs_bmap.c LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && new 766 fs/xfs/xfs_bmap.c LEFT.br_state == new->br_state && new 767 fs/xfs/xfs_bmap.c LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); new 782 fs/xfs/xfs_bmap.c new->br_startblock + new->br_blockcount == new 784 fs/xfs/xfs_bmap.c new->br_state == RIGHT.br_state && new 785 fs/xfs/xfs_bmap.c new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && new 788 fs/xfs/xfs_bmap.c LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount new 884 fs/xfs/xfs_bmap.c xfs_bmbt_set_startblock(ep, new->br_startblock); new 901 fs/xfs/xfs_bmap.c new->br_startblock, new 920 fs/xfs/xfs_bmap.c xfs_bmbt_set_startblock(ep, new->br_startblock); new 928 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new 929 fs/xfs/xfs_bmap.c new->br_startblock, new->br_blockcount, new 940 fs/xfs/xfs_bmap.c temp = new->br_startoff; new 941 fs/xfs/xfs_bmap.c temp2 = new->br_blockcount; new 951 fs/xfs/xfs_bmap.c LEFT.br_blockcount + new->br_blockcount); new 953 fs/xfs/xfs_bmap.c PREV.br_startoff + new->br_blockcount); new 955 fs/xfs/xfs_bmap.c temp = PREV.br_blockcount - new->br_blockcount; new 971 fs/xfs/xfs_bmap.c new->br_blockcount, new 993 fs/xfs/xfs_bmap.c temp = PREV.br_blockcount - new->br_blockcount; new 995 fs/xfs/xfs_bmap.c XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, new 997 fs/xfs/xfs_bmap.c xfs_iext_insert(ifp, idx, 1, new); new 1004 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new 1005 fs/xfs/xfs_bmap.c new->br_startblock, new->br_blockcount, new 1040 fs/xfs/xfs_bmap.c temp = PREV.br_blockcount - new->br_blockcount; new 1045 fs/xfs/xfs_bmap.c new->br_startoff, new->br_startblock, new 1046 fs/xfs/xfs_bmap.c new->br_blockcount + RIGHT.br_blockcount, new 1059 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_update(cur, new->br_startoff, new 1060 fs/xfs/xfs_bmap.c new->br_startblock, new 1061 fs/xfs/xfs_bmap.c new->br_blockcount + new 1082 fs/xfs/xfs_bmap.c temp = PREV.br_blockcount - new->br_blockcount; new 1085 fs/xfs/xfs_bmap.c XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, new 1087 fs/xfs/xfs_bmap.c xfs_iext_insert(ifp, idx + 1, 1, new); new 1094 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new 1095 fs/xfs/xfs_bmap.c new->br_startblock, new->br_blockcount, new 1131 fs/xfs/xfs_bmap.c temp = new->br_startoff - PREV.br_startoff; new 1134 fs/xfs/xfs_bmap.c r[0] = *new; new 1149 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new 1150 fs/xfs/xfs_bmap.c new->br_startblock, new->br_blockcount, new 1302 fs/xfs/xfs_bmap.c newext = new->br_state; new 1306 fs/xfs/xfs_bmap.c new_endoff = new->br_startoff + new->br_blockcount; new 1307 fs/xfs/xfs_bmap.c ASSERT(PREV.br_startoff <= new->br_startoff); new 1313 fs/xfs/xfs_bmap.c STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff); new 1326 fs/xfs/xfs_bmap.c LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff && new 1327 fs/xfs/xfs_bmap.c LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock && new 1329 fs/xfs/xfs_bmap.c LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN); new 1344 fs/xfs/xfs_bmap.c new->br_startblock + new->br_blockcount == new 1347 fs/xfs/xfs_bmap.c new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN && new 1350 fs/xfs/xfs_bmap.c LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount new 1480 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_update(cur, new->br_startoff, new 1481 fs/xfs/xfs_bmap.c new->br_startblock, new 1482 fs/xfs/xfs_bmap.c new->br_blockcount + RIGHT.br_blockcount, new 1508 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new 1509 fs/xfs/xfs_bmap.c new->br_startblock, new->br_blockcount, new 1513 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_update(cur, new->br_startoff, new 1514 fs/xfs/xfs_bmap.c new->br_startblock, new->br_blockcount, new 1519 fs/xfs/xfs_bmap.c temp = new->br_startoff; new 1520 fs/xfs/xfs_bmap.c temp2 = new->br_blockcount; new 1531 fs/xfs/xfs_bmap.c LEFT.br_blockcount + new->br_blockcount); new 1533 fs/xfs/xfs_bmap.c PREV.br_startoff + new->br_blockcount); new 1539 fs/xfs/xfs_bmap.c new->br_startblock + new->br_blockcount); new 1541 fs/xfs/xfs_bmap.c PREV.br_blockcount - new->br_blockcount); new 1555 fs/xfs/xfs_bmap.c PREV.br_startoff + new->br_blockcount, new 1556 fs/xfs/xfs_bmap.c PREV.br_startblock + new->br_blockcount, new 1557 fs/xfs/xfs_bmap.c PREV.br_blockcount - new->br_blockcount, new 1564 fs/xfs/xfs_bmap.c LEFT.br_blockcount + new->br_blockcount, new 1583 fs/xfs/xfs_bmap.c PREV.br_blockcount - new->br_blockcount); new 1585 fs/xfs/xfs_bmap.c new->br_startblock + new->br_blockcount); new 1587 fs/xfs/xfs_bmap.c XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, new 1589 fs/xfs/xfs_bmap.c xfs_iext_insert(ifp, idx, 1, new); new 1602 fs/xfs/xfs_bmap.c PREV.br_startoff + new->br_blockcount, new 1603 fs/xfs/xfs_bmap.c PREV.br_startblock + new->br_blockcount, new 1604 fs/xfs/xfs_bmap.c PREV.br_blockcount - new->br_blockcount, new 1607 fs/xfs/xfs_bmap.c cur->bc_rec.b = *new; new 1627 fs/xfs/xfs_bmap.c PREV.br_blockcount - new->br_blockcount); new 1631 fs/xfs/xfs_bmap.c new->br_startoff, new->br_startblock, new 1632 fs/xfs/xfs_bmap.c new->br_blockcount + RIGHT.br_blockcount, newext); new 1647 fs/xfs/xfs_bmap.c PREV.br_blockcount - new->br_blockcount, new 1652 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_update(cur, new->br_startoff, new 1653 fs/xfs/xfs_bmap.c new->br_startblock, new 1654 fs/xfs/xfs_bmap.c new->br_blockcount + RIGHT.br_blockcount, new 1671 fs/xfs/xfs_bmap.c PREV.br_blockcount - new->br_blockcount); new 1673 fs/xfs/xfs_bmap.c XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, new 1675 fs/xfs/xfs_bmap.c xfs_iext_insert(ifp, idx + 1, 1, new); new 1689 fs/xfs/xfs_bmap.c PREV.br_blockcount - new->br_blockcount, new 1692 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new 1693 fs/xfs/xfs_bmap.c new->br_startblock, new->br_blockcount, new 1715 fs/xfs/xfs_bmap.c new->br_startoff - PREV.br_startoff); new 1717 fs/xfs/xfs_bmap.c r[0] = *new; new 1721 fs/xfs/xfs_bmap.c r[1].br_startblock = new->br_startblock + new->br_blockcount; new 1745 fs/xfs/xfs_bmap.c new->br_startoff - PREV.br_startoff; new 1754 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, new 1755 fs/xfs/xfs_bmap.c new->br_startblock, new->br_blockcount, new 1760 fs/xfs/xfs_bmap.c cur->bc_rec.b.br_state = new->br_state; new 1846 fs/xfs/xfs_bmap.c ASSERT(ISNULLSTARTBLOCK(new->br_startblock)); new 1870 fs/xfs/xfs_bmap.c left.br_startoff + left.br_blockcount == new->br_startoff && new 1871 fs/xfs/xfs_bmap.c left.br_blockcount + new->br_blockcount <= MAXEXTLEN); new 1874 fs/xfs/xfs_bmap.c new->br_startoff + new->br_blockcount == right.br_startoff && new 1875 fs/xfs/xfs_bmap.c new->br_blockcount + right.br_blockcount <= MAXEXTLEN && new 1877 fs/xfs/xfs_bmap.c (left.br_blockcount + new->br_blockcount + new 1890 fs/xfs/xfs_bmap.c temp = left.br_blockcount + new->br_blockcount + new 1896 fs/xfs/xfs_bmap.c STARTBLOCKVAL(new->br_startblock) + new 1917 fs/xfs/xfs_bmap.c temp = left.br_blockcount + new->br_blockcount; new 1922 fs/xfs/xfs_bmap.c STARTBLOCKVAL(new->br_startblock); new 1941 fs/xfs/xfs_bmap.c temp = new->br_blockcount + right.br_blockcount; new 1942 fs/xfs/xfs_bmap.c oldlen = STARTBLOCKVAL(new->br_startblock) + new 1945 fs/xfs/xfs_bmap.c xfs_bmbt_set_allf(ep, new->br_startoff, new 1951 fs/xfs/xfs_bmap.c temp = new->br_startoff; new 1961 fs/xfs/xfs_bmap.c XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, new 1963 fs/xfs/xfs_bmap.c xfs_iext_insert(ifp, idx, 1, new); new 1966 fs/xfs/xfs_bmap.c temp2 = new->br_blockcount; new 1967 fs/xfs/xfs_bmap.c temp = new->br_startoff; new 2060 fs/xfs/xfs_bmap.c left.br_startoff + left.br_blockcount == new->br_startoff && new 2061 fs/xfs/xfs_bmap.c left.br_startblock + left.br_blockcount == new->br_startblock && new 2062 fs/xfs/xfs_bmap.c left.br_state == new->br_state && new 2063 fs/xfs/xfs_bmap.c left.br_blockcount + new->br_blockcount <= MAXEXTLEN); new 2066 fs/xfs/xfs_bmap.c new->br_startoff + new->br_blockcount == right.br_startoff && new 2067 fs/xfs/xfs_bmap.c new->br_startblock + new->br_blockcount == new 2069 fs/xfs/xfs_bmap.c new->br_state == right.br_state && new 2070 fs/xfs/xfs_bmap.c new->br_blockcount + right.br_blockcount <= MAXEXTLEN && new 2072 fs/xfs/xfs_bmap.c left.br_blockcount + new->br_blockcount + new 2090 fs/xfs/xfs_bmap.c left.br_blockcount + new->br_blockcount + new 2118 fs/xfs/xfs_bmap.c new->br_blockcount + new 2126 fs/xfs/xfs_bmap.c new->br_blockcount + new 2138 fs/xfs/xfs_bmap.c left.br_blockcount + new->br_blockcount); new 2154 fs/xfs/xfs_bmap.c new->br_blockcount, new 2161 fs/xfs/xfs_bmap.c new->br_blockcount; new 2171 fs/xfs/xfs_bmap.c xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, new 2172 fs/xfs/xfs_bmap.c new->br_blockcount + right.br_blockcount, new 2186 fs/xfs/xfs_bmap.c if ((error = xfs_bmbt_update(cur, new->br_startoff, new 2187 fs/xfs/xfs_bmap.c new->br_startblock, new 2188 fs/xfs/xfs_bmap.c new->br_blockcount + new 2194 fs/xfs/xfs_bmap.c temp = new->br_startoff; new 2195 fs/xfs/xfs_bmap.c temp2 = new->br_blockcount + new 2205 fs/xfs/xfs_bmap.c XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork); new 2206 fs/xfs/xfs_bmap.c xfs_iext_insert(ifp, idx, 1, new); new 2215 fs/xfs/xfs_bmap.c new->br_startoff, new 2216 fs/xfs/xfs_bmap.c new->br_startblock, new 2217 fs/xfs/xfs_bmap.c new->br_blockcount, &i))) new 2220 fs/xfs/xfs_bmap.c cur->bc_rec.b.br_state = new->br_state; new 2226 fs/xfs/xfs_bmap.c temp = new->br_startoff; new 2227 fs/xfs/xfs_bmap.c temp2 = new->br_blockcount; new 3075 fs/xfs/xfs_bmap.c xfs_bmbt_irec_t new; /* new record to be inserted */ new 3244 fs/xfs/xfs_bmap.c new.br_startoff = del_endoff; new 3246 fs/xfs/xfs_bmap.c new.br_blockcount = temp2; new 3247 fs/xfs/xfs_bmap.c new.br_state = got.br_state; new 3249 fs/xfs/xfs_bmap.c new.br_startblock = del_endblock; new 3259 fs/xfs/xfs_bmap.c cur->bc_rec.b = new; new 3310 fs/xfs/xfs_bmap.c new.br_startblock = NULLSTARTBLOCK((int)temp2); new 3324 fs/xfs/xfs_bmap.c new.br_startblock = new 3330 fs/xfs/xfs_bmap.c XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL, new 3332 fs/xfs/xfs_bmap.c xfs_iext_insert(ifp, idx + 1, 1, &new); new 4106 fs/xfs/xfs_bmap.c xfs_bmap_free_item_t *new; /* new element */ new 4124 fs/xfs/xfs_bmap.c new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP); new 4125 fs/xfs/xfs_bmap.c new->xbfi_startblock = bno; new 4126 fs/xfs/xfs_bmap.c new->xbfi_blockcount = (xfs_extlen_t)len; new 4134 fs/xfs/xfs_bmap.c prev->xbfi_next = new; new 4136 fs/xfs/xfs_bmap.c flist->xbf_first = new; new 4137 fs/xfs/xfs_bmap.c new->xbfi_next = cur; new 385 fs/xfs/xfs_btree.c xfs_btree_cur_t *new; /* new cursor value */ new 393 fs/xfs/xfs_btree.c new = xfs_btree_init_cursor(mp, tp, cur->bc_private.a.agbp, new 399 fs/xfs/xfs_btree.c new->bc_rec = cur->bc_rec; new 403 fs/xfs/xfs_btree.c for (i = 0; i < new->bc_nlevels; i++) { new 404 fs/xfs/xfs_btree.c new->bc_ptrs[i] = cur->bc_ptrs[i]; new 405 fs/xfs/xfs_btree.c new->bc_ra[i] = cur->bc_ra[i]; new 409 fs/xfs/xfs_btree.c xfs_btree_del_cursor(new, error); new 413 fs/xfs/xfs_btree.c new->bc_bufs[i] = bp; new 417 fs/xfs/xfs_btree.c new->bc_bufs[i] = NULL; new 423 fs/xfs/xfs_btree.c if (new->bc_btnum == XFS_BTNUM_BMAP) { new 424 fs/xfs/xfs_btree.c new->bc_private.b.firstblock = cur->bc_private.b.firstblock; new 425 fs/xfs/xfs_btree.c new->bc_private.b.flist = cur->bc_private.b.flist; new 426 fs/xfs/xfs_btree.c new->bc_private.b.flags = cur->bc_private.b.flags; new 428 fs/xfs/xfs_btree.c *ncur = new; new 253 fs/xfs/xfs_dir2_data.c xfs_dir2_data_free_t new; /* new bestfree entry */ new 260 fs/xfs/xfs_dir2_data.c new.length = dup->length; new 261 fs/xfs/xfs_dir2_data.c new.offset = cpu_to_be16((char *)dup - (char *)d); new 265 fs/xfs/xfs_dir2_data.c if (be16_to_cpu(new.length) > be16_to_cpu(dfp[0].length)) { new 268 fs/xfs/xfs_dir2_data.c dfp[0] = new; new 272 fs/xfs/xfs_dir2_data.c if (be16_to_cpu(new.length) > be16_to_cpu(dfp[1].length)) { new 274 fs/xfs/xfs_dir2_data.c dfp[1] = new; new 278 fs/xfs/xfs_dir2_data.c if (be16_to_cpu(new.length) > be16_to_cpu(dfp[2].length)) { new 279 fs/xfs/xfs_dir2_data.c dfp[2] = new; new 137 fs/xfs/xfs_fsops.c xfs_rfsblock_t new; new 158 fs/xfs/xfs_fsops.c new = nb; /* use new as a temporary here */ new 159 fs/xfs/xfs_fsops.c nb_mod = do_div(new, mp->m_sb.sb_agblocks); new 160 fs/xfs/xfs_fsops.c nagcount = new + (nb_mod != 0); new 167 fs/xfs/xfs_fsops.c new = nb - mp->m_sb.sb_dblocks; new 191 fs/xfs/xfs_fsops.c for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { new 313 fs/xfs/xfs_fsops.c if (new) { new 323 fs/xfs/xfs_fsops.c be32_add_cpu(&agi->agi_length, new); new 336 fs/xfs/xfs_fsops.c be32_add_cpu(&agf->agf_length, new); new 344 fs/xfs/xfs_fsops.c be32_to_cpu(agf->agf_length) - new), new); new 1197 fs/xfs/xfs_ialloc_btree.c xfs_inobt_block_t *new; /* new (root) btree block */ new 1230 fs/xfs/xfs_ialloc_btree.c new = XFS_BUF_TO_INOBT_BLOCK(nbp); new 1288 fs/xfs/xfs_ialloc_btree.c new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); new 1289 fs/xfs/xfs_ialloc_btree.c new->bb_level = cpu_to_be16(cur->bc_nlevels); new 1290 fs/xfs/xfs_ialloc_btree.c new->bb_numrecs = cpu_to_be16(2); new 1291 fs/xfs/xfs_ialloc_btree.c new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); new 1292 fs/xfs/xfs_ialloc_btree.c new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); new 1298 fs/xfs/xfs_ialloc_btree.c kp = XFS_INOBT_KEY_ADDR(new, 1, cur); new 1312 fs/xfs/xfs_ialloc_btree.c pp = XFS_INOBT_PTR_ADDR(new, 1, cur); new 3555 fs/xfs/xfs_inode.c for (i = idx; i < idx + count; i++, new++) new 3556 fs/xfs/xfs_inode.c xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new); new 910 fs/xfs/xfs_rtalloc.c xfs_rtblock_t new; /* dummy for xfs_rtcheck_range */ new 912 fs/xfs/xfs_rtalloc.c return xfs_rtcheck_range(mp, tp, bno, len, 0, &new, stat); new 986 fs/xfs/xfs_rtalloc.c *new = start + i; new 1032 fs/xfs/xfs_rtalloc.c *new = start + i; new 1077 fs/xfs/xfs_rtalloc.c *new = start + i; new 1087 fs/xfs/xfs_rtalloc.c *new = start + i; new 136 include/asm-cris/atomic.h v->counter = new; new 141 include/asm-cris/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) new 181 include/asm-frv/atomic.h #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) new 182 include/asm-frv/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) new 218 include/asm-frv/system.h __typeof__(*(ptr)) __xg_new = (new); \ new 258 include/asm-frv/system.h __typeof__(*(ptr)) __xg_new = (new); \ new 284 include/asm-frv/system.h return cmpxchg((unsigned long *)ptr, old, new); new 286 include/asm-frv/system.h return __cmpxchg_local_generic(ptr, old, new, size); new 135 include/asm-generic/atomic.h (atomic_cmpxchg((atomic64_t *)(l), (old), (new))) new 137 include/asm-generic/atomic.h (atomic_xchg((atomic64_t *)(l), (new))) new 252 include/asm-generic/atomic.h (atomic_cmpxchg((atomic_t *)(l), (old), (new))) new 254 include/asm-generic/atomic.h (atomic_xchg((atomic_t *)(l), (new))) new 27 include/asm-generic/cmpxchg-local.h *(u8 *)ptr = (u8)new; new 31 include/asm-generic/cmpxchg-local.h *(u16 *)ptr = (u16)new; new 35 include/asm-generic/cmpxchg-local.h *(u32 *)ptr = (u32)new; new 39 include/asm-generic/cmpxchg-local.h *(u64 *)ptr = (u64)new; new 60 include/asm-generic/cmpxchg-local.h *(u64 *)ptr = new; new 245 include/asm-m32r/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) new 248 include/asm-m32r/local.h #define local_xchg(v, new) (xchg_local(&((l)->counter), new)) new 257 include/asm-m32r/system.h : "r" (p), "r" (old), "r" (new) new 288 include/asm-m32r/system.h : "r" (p), "r" (old), "r" (new) new 308 include/asm-m32r/system.h return __cmpxchg_u32(ptr, old, new); new 311 include/asm-m32r/system.h return __cmpxchg_u64(ptr, old, new); new 330 include/asm-m32r/system.h return __cmpxchg_local_u32(ptr, old, new); new 332 include/asm-m32r/system.h return __cmpxchg_local_generic(ptr, old, new, size); new 87 include/asm-m68k/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) new 127 include/asm-m68k/atomic.h atomic_set(v, new); new 139 include/asm-m68k/atomic.h atomic_set(v, new); new 176 include/asm-m68k/system.h : "d" (new), "0" (old), "m" (*(char *)p)); new 181 include/asm-m68k/system.h : "d" (new), "0" (old), "m" (*(short *)p)); new 186 include/asm-m68k/system.h : "d" (new), "0" (old), "m" (*(int *)p)); new 155 include/asm-mn10300/atomic.h #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) new 224 include/asm-mn10300/system.h *m = new; new 200 include/asm-parisc/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) new 316 include/asm-parisc/atomic.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) new 457 include/asm-parisc/pgtable.h unsigned long new, old; new 461 include/asm-parisc/pgtable.h new = pte_val(pte_wrprotect(__pte (old))); new 462 include/asm-parisc/pgtable.h } while (cmpxchg((unsigned long *) ptep, old, new) != old); new 28 include/asm-um/mmu_context.h __switch_mm(&new->context.id); new 29 include/asm-um/mmu_context.h arch_dup_mmap(old, new); new 211 include/asm-x86/atomic_32.h #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) new 212 include/asm-x86/atomic_32.h #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) new 372 include/asm-x86/atomic_64.h #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) new 373 include/asm-x86/atomic_64.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) new 375 include/asm-x86/atomic_64.h #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) new 376 include/asm-x86/atomic_64.h #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) new 144 include/asm-x86/cmpxchg_32.h : "q"(new), "m"(*__xg(ptr)), "0"(old) new 150 include/asm-x86/cmpxchg_32.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 156 include/asm-x86/cmpxchg_32.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 177 include/asm-x86/cmpxchg_32.h : "q"(new), "m"(*__xg(ptr)), "0"(old) new 183 include/asm-x86/cmpxchg_32.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 189 include/asm-x86/cmpxchg_32.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 205 include/asm-x86/cmpxchg_32.h : "q"(new), "m"(*__xg(ptr)), "0"(old) new 211 include/asm-x86/cmpxchg_32.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 217 include/asm-x86/cmpxchg_32.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 231 include/asm-x86/cmpxchg_32.h : "b"((unsigned long)new), new 232 include/asm-x86/cmpxchg_32.h "c"((unsigned long)(new >> 32)), new 246 include/asm-x86/cmpxchg_32.h : "b"((unsigned long)new), new 247 include/asm-x86/cmpxchg_32.h "c"((unsigned long)(new >> 32)), new 270 include/asm-x86/cmpxchg_32.h return cmpxchg_386_u8(ptr, old, new); new 272 include/asm-x86/cmpxchg_32.h return cmpxchg_386_u16(ptr, old, new); new 274 include/asm-x86/cmpxchg_32.h return cmpxchg_386_u32(ptr, old, new); new 71 include/asm-x86/cmpxchg_64.h : "q"(new), "m"(*__xg(ptr)), "0"(old) new 77 include/asm-x86/cmpxchg_64.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 83 include/asm-x86/cmpxchg_64.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 89 include/asm-x86/cmpxchg_64.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 110 include/asm-x86/cmpxchg_64.h : "q"(new), "m"(*__xg(ptr)), "0"(old) new 116 include/asm-x86/cmpxchg_64.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 122 include/asm-x86/cmpxchg_64.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 138 include/asm-x86/cmpxchg_64.h : "q"(new), "m"(*__xg(ptr)), "0"(old) new 144 include/asm-x86/cmpxchg_64.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 150 include/asm-x86/cmpxchg_64.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 156 include/asm-x86/cmpxchg_64.h : "r"(new), "m"(*__xg(ptr)), "0"(old) new 98 include/asm-x86/kvm_x86_emulate.h const void *new, new 44 include/asm-x86/mc146818rtc.h unsigned long new; new 45 include/asm-x86/mc146818rtc.h new = ((smp_processor_id() + 1) << 8) | reg; new 51 include/asm-x86/mc146818rtc.h if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) new 82 include/asm-x86/spinlock.h int tmp, new; new 92 include/asm-x86/spinlock.h : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) new 133 include/asm-x86/spinlock.h int new; new 145 include/asm-x86/spinlock.h : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) new 226 include/asm-xtensa/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) new 96 include/asm-xtensa/system.h : "a" (p), "a" (old), "r" (new) new 109 include/asm-xtensa/system.h case 4: return __cmpxchg_u32(ptr, old, new); new 130 include/asm-xtensa/system.h return __cmpxchg_u32(ptr, old, new); new 132 include/asm-xtensa/system.h return __cmpxchg_local_generic(ptr, old, new, size); new 581 include/drm/drmP.h int new); new 329 include/linux/cgroup.h struct cgroup *new); new 126 include/linux/cpufreq.h unsigned int new; new 370 include/linux/cpumask.h __cpu_remap((oldbit), &(old), &(new), NR_CPUS) new 378 include/linux/cpumask.h __cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS) new 138 include/linux/hdlcdrv.h unsigned char new; new 140 include/linux/hdlcdrv.h new = buf->shreg & 1; new 143 include/linux/hdlcdrv.h if (new) { new 218 include/linux/jffs2.h jint32_t new; new 45 include/linux/list.h next->prev = new; new 46 include/linux/list.h new->next = next; new 47 include/linux/list.h new->prev = prev; new 48 include/linux/list.h prev->next = new; new 66 include/linux/list.h __list_add(new, head, head->next); new 80 include/linux/list.h __list_add(new, head->prev, head); new 123 include/linux/list.h new->next = old->next; new 124 include/linux/list.h new->next->prev = new; new 125 include/linux/list.h new->prev = old->prev; new 126 include/linux/list.h new->prev->next = new; new 132 include/linux/list.h list_replace(old, new); new 629 include/linux/list.h new->first = old->first; new 630 include/linux/list.h if (new->first) new 631 include/linux/list.h new->first->pprev = &new->first; new 12 include/linux/mISDNdsp.h void *(*new)(const char *arg); new 192 include/linux/mm.h int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); new 152 include/linux/netfilter/nf_conntrack_common.h unsigned int new; new 140 include/linux/nfsd/xdr4.h } new; new 158 include/linux/nfsd/xdr4.h #define lk_new_open_seqid v.new.open_seqid new 159 include/linux/nfsd/xdr4.h #define lk_new_open_stateid v.new.open_stateid new 160 include/linux/nfsd/xdr4.h #define lk_new_lock_seqid v.new.lock_seqid new 161 include/linux/nfsd/xdr4.h #define lk_new_clientid v.new.clientid new 162 include/linux/nfsd/xdr4.h #define lk_new_owner v.new.owner new 318 include/linux/nodemask.h __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) new 326 include/linux/nodemask.h __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) new 21 include/linux/rculist.h new->next = next; new 22 include/linux/rculist.h new->prev = prev; new 23 include/linux/rculist.h rcu_assign_pointer(prev->next, new); new 24 include/linux/rculist.h next->prev = new; new 45 include/linux/rculist.h __list_add_rcu(new, head, head->next); new 67 include/linux/rculist.h __list_add_rcu(new, head->prev, head); new 139 include/linux/rculist.h new->next = old->next; new 140 include/linux/rculist.h new->prev = old->prev; new 141 include/linux/rculist.h rcu_assign_pointer(new->prev->next, new); new 142 include/linux/rculist.h new->next->prev = new; new 275 include/linux/rculist.h new->next = next; new 276 include/linux/rculist.h new->pprev = old->pprev; new 277 include/linux/rculist.h rcu_assign_pointer(*new->pprev, new); new 279 include/linux/rculist.h new->next->pprev = &new->next; new 191 include/linux/serial_core.h void (*set_termios)(struct uart_port *, struct ktermios *new, new 84 include/linux/sunrpc/cache.h int (*match)(struct cache_head *orig, struct cache_head *new); new 85 include/linux/sunrpc/cache.h void (*init)(struct cache_head *orig, struct cache_head *new); new 86 include/linux/sunrpc/cache.h void (*update)(struct cache_head *orig, struct cache_head *new); new 126 include/linux/wait.h list_add(&new->task_list, &head->task_list); new 135 include/linux/wait.h list_add_tail(&new->task_list, &head->task_list); new 917 include/net/ip_vs.h __be32 diff[2] = { ~old, new }; new 927 include/net/ip_vs.h new[3], new[2], new[1], new[0] }; new 935 include/net/ip_vs.h __be16 diff[2] = { ~old, new }; new 19 include/net/netevent.h struct dst_entry *new; new 25 include/net/netfilter/nf_conntrack_expect.h void (*expectfn)(struct nf_conn *new, new 72 include/net/netfilter/nf_conntrack_extend.h void (*move)(void *new, void *old); new 47 include/net/netfilter/nf_conntrack_l4proto.h bool (*new)(struct nf_conn *ct, const struct sk_buff *skb, new 1273 include/net/tcp.h __skb_queue_before(&sk->sk_write_queue, skb, new); new 1276 include/net/tcp.h sk->sk_send_head = new; new 1325 include/net/tcp.h tcp_sk(sk)->highest_sack = new; new 318 include/sound/soc-dapm.h unsigned char new:1; /* cnew complete */ new 983 ipc/sem.c struct sem_undo *un, *new; new 1009 ipc/sem.c new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); new 1010 ipc/sem.c if (!new) { new 1019 ipc/sem.c kfree(new); new 1030 ipc/sem.c kfree(new); new 1034 ipc/sem.c new->semadj = (short *) &new[1]; new 1035 ipc/sem.c new->ulp = ulp; new 1036 ipc/sem.c new->semid = semid; new 1038 ipc/sem.c list_add_rcu(&new->list_proc, &ulp->list_proc); new 1040 ipc/sem.c list_add(&new->list_id, &sma->list_id); new 1041 ipc/sem.c un = new; new 227 ipc/shm.c err = sfd->vm_ops->set_policy(vma, new); new 269 ipc/util.c err = idr_get_new(&ids->ipcs_idr, new, &id); new 275 ipc/util.c new->cuid = new->uid = current->euid; new 276 ipc/util.c new->gid = new->cgid = current->egid; new 278 ipc/util.c new->seq = ids->seq++; new 282 ipc/util.c new->id = ipc_buildid(id, new->seq); new 283 ipc/util.c spin_lock_init(&new->lock); new 284 ipc/util.c new->deleted = 0; new 286 ipc/util.c spin_lock(&new->lock); new 269 kernel/audit.c audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new, new 302 kernel/audit.c rc = audit_log_config_change(function_name, new, old, loginuid, new 310 kernel/audit.c *to_change = new; new 823 kernel/audit.c char *old, *new; new 836 kernel/audit.c new = audit_unpack_string(&bufp, &msglen, sizes[1]); new 837 kernel/audit.c if (IS_ERR(new)) { new 838 kernel/audit.c err = PTR_ERR(new); new 843 kernel/audit.c err = audit_tag_tree(old, new); new 851 kernel/audit.c audit_log_untrustedstring(ab, new); new 855 kernel/audit.c kfree(new); new 199 kernel/audit_tree.c struct audit_chunk *new; new 227 kernel/audit_tree.c new = alloc_chunk(size); new 228 kernel/audit_tree.c if (!new) new 230 kernel/audit_tree.c if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { new 231 kernel/audit_tree.c free_chunk(new); new 237 kernel/audit_tree.c list_replace_init(&chunk->trees, &new->trees); new 251 kernel/audit_tree.c new->owners[i].owner = s; new 252 kernel/audit_tree.c new->owners[i].index = chunk->owners[j].index - j + i; new 256 kernel/audit_tree.c list_replace_init(&chunk->owners[i].list, &new->owners[j].list); new 259 kernel/audit_tree.c list_replace_rcu(&chunk->hash, &new->hash); new 260 kernel/audit_tree.c list_for_each_entry(owner, &new->trees, same_root) new 261 kernel/audit_tree.c owner->root = new; new 700 kernel/audit_tree.c err = path_lookup(new, 0, &nd); new 847 kernel/auditfilter.c struct audit_watch *new; new 853 kernel/auditfilter.c new = audit_init_watch(path); new 854 kernel/auditfilter.c if (IS_ERR(new)) { new 859 kernel/auditfilter.c new->dev = old->dev; new 860 kernel/auditfilter.c new->ino = old->ino; new 862 kernel/auditfilter.c new->parent = old->parent; new 865 kernel/auditfilter.c return new; new 907 kernel/auditfilter.c struct audit_krule *new; new 915 kernel/auditfilter.c new = &entry->rule; new 916 kernel/auditfilter.c new->vers_ops = old->vers_ops; new 917 kernel/auditfilter.c new->flags = old->flags; new 918 kernel/auditfilter.c new->listnr = old->listnr; new 919 kernel/auditfilter.c new->action = old->action; new 921 kernel/auditfilter.c new->mask[i] = old->mask[i]; new 922 kernel/auditfilter.c new->buflen = old->buflen; new 923 kernel/auditfilter.c new->inode_f = old->inode_f; new 924 kernel/auditfilter.c new->watch = NULL; new 925 kernel/auditfilter.c new->field_count = old->field_count; new 933 kernel/auditfilter.c new->tree = old->tree; new 934 kernel/auditfilter.c memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount); new 939 kernel/auditfilter.c switch (new->fields[i].type) { new 950 kernel/auditfilter.c err = audit_dupe_lsm_field(&new->fields[i], new 958 kernel/auditfilter.c new->filterkey = fk; new 968 kernel/auditfilter.c new->watch = watch; new 889 kernel/cgroup.c struct cgroupfs_root *new = data; new 893 kernel/cgroup.c if (new->subsys_bits != root->subsys_bits) new 897 kernel/cgroup.c if (new->flags != root->flags) new 2748 kernel/cgroup.c if (new) new 2749 kernel/cgroup.c newcgrp = task_cgroup(new, ss->subsys_id); new 521 kernel/compat.c if (!new) new 523 kernel/compat.c if (get_compat_itimerspec(&newts, new)) new 992 kernel/compat.c unsigned long __user *new = NULL; new 1004 kernel/compat.c new = old + size / sizeof(unsigned long); new 1011 kernel/compat.c if (new == NULL) new 1012 kernel/compat.c new = compat_alloc_user_space(size); new 1013 kernel/compat.c if (copy_to_user(new, nodes_addr(tmp_mask), size)) new 1016 kernel/compat.c return sys_migrate_pages(pid, nr_bits + 1, old, new); new 363 kernel/irq/manage.c if (new->flags & IRQF_SAMPLE_RANDOM) { new 388 kernel/irq/manage.c if (!((old->flags & new->flags) & IRQF_SHARED) || new 389 kernel/irq/manage.c ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { new 397 kernel/irq/manage.c (new->flags & IRQF_PERCPU)) new 413 kernel/irq/manage.c if (new->flags & IRQF_TRIGGER_MASK) { new 414 kernel/irq/manage.c ret = __irq_set_trigger(desc->chip, irq, new->flags); new 423 kernel/irq/manage.c if (new->flags & IRQF_PERCPU) new 445 kernel/irq/manage.c *p = new; new 448 kernel/irq/manage.c if (new->flags & IRQF_NOBALANCING) new 466 kernel/irq/manage.c new->irq = irq; new 468 kernel/irq/manage.c new->dir = NULL; new 469 kernel/irq/manage.c register_handler_proc(irq, new); new 475 kernel/irq/manage.c if (!(new->flags & IRQF_PROBE_SHARED)) { new 236 kernel/marker.c struct marker_probe_closure *old, *new; new 268 kernel/marker.c new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure), new 270 kernel/marker.c if (new == NULL) new 273 kernel/marker.c new[0] = entry->single; new 275 kernel/marker.c memcpy(new, old, new 277 kernel/marker.c new[nr_probes].func = probe; new 278 kernel/marker.c new[nr_probes].probe_private = probe_private; new 280 kernel/marker.c entry->multi = new; new 291 kernel/marker.c struct marker_probe_closure *old, *new; new 334 kernel/marker.c new = kzalloc((nr_probes - nr_del + 1) new 336 kernel/marker.c if (new == NULL) new 341 kernel/marker.c new[j++] = old[i]; new 344 kernel/marker.c entry->multi = new; new 350 kernel/module.c int *new; new 352 kernel/module.c new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2, new 354 kernel/module.c if (!new) new 358 kernel/module.c pcpu_size = new; new 221 kernel/nsproxy.c rcu_assign_pointer(p->nsproxy, new); new 336 kernel/pid.c link->pid = new; new 361 kernel/pid.c new->pids[type].pid = old->pids[type].pid; new 362 kernel/pid.c hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); new 719 kernel/posix-cpu-timers.c new_expires = timespec_to_sample(timer->it_clock, &new->it_value); new 828 kernel/posix-cpu-timers.c &new->it_interval); new 82 kernel/power/swsusp.c struct rb_node **new = &(swsusp_extents.rb_node); new 87 kernel/power/swsusp.c while (*new) { new 88 kernel/power/swsusp.c ext = container_of(*new, struct swsusp_extent, node); new 89 kernel/power/swsusp.c parent = *new; new 96 kernel/power/swsusp.c new = &((*new)->rb_left); new 103 kernel/power/swsusp.c new = &((*new)->rb_right); new 116 kernel/power/swsusp.c rb_link_node(&ext->node, parent, new); new 726 kernel/relay.c void *old, *new; new 757 kernel/relay.c new = buf->start + new_subbuf * buf->chan->subbuf_size; new 759 kernel/relay.c if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) { new 763 kernel/relay.c buf->data = new; new 145 kernel/resource.c resource_size_t start = new->start; new 146 kernel/resource.c resource_size_t end = new->end; new 159 kernel/resource.c new->sibling = tmp; new 160 kernel/resource.c *p = new; new 161 kernel/resource.c new->parent = root; new 202 kernel/resource.c conflict = __request_resource(root, new); new 302 kernel/resource.c new->start = root->start; new 308 kernel/resource.c new->start = this->end + 1; new 313 kernel/resource.c new->end = this->start - 1; new 315 kernel/resource.c new->end = root->end; new 316 kernel/resource.c if (new->start < min) new 317 kernel/resource.c new->start = min; new 318 kernel/resource.c if (new->end > max) new 319 kernel/resource.c new->end = max; new 320 kernel/resource.c new->start = ALIGN(new->start, align); new 322 kernel/resource.c alignf(alignf_data, new, size, align); new 323 kernel/resource.c if (new->start < new->end && new->end - new->start >= size - 1) { new 324 kernel/resource.c new->end = new->start + size - 1; new 329 kernel/resource.c new->start = this->end + 1; new 356 kernel/resource.c err = find_resource(root, new, size, min, max, align, alignf, alignf_data); new 357 kernel/resource.c if (err >= 0 && __request_resource(root, new)) new 374 kernel/resource.c first = __request_resource(parent, new); new 381 kernel/resource.c if ((first->start > new->start) || (first->end < new->end)) new 383 kernel/resource.c if ((first->start == new->start) && (first->end == new->end)) new 389 kernel/resource.c if (next->start < new->start || next->end > new->end) new 393 kernel/resource.c if (next->sibling->start > new->end) new 397 kernel/resource.c new->parent = parent; new 398 kernel/resource.c new->sibling = next->sibling; new 399 kernel/resource.c new->child = first; new 403 kernel/resource.c next->parent = new; new 406 kernel/resource.c parent->child = new; new 411 kernel/resource.c next->sibling = new; new 434 kernel/resource.c conflict = __insert_resource(parent, new); new 449 kernel/resource.c if (new->parent) new 456 kernel/resource.c conflict = __insert_resource(root, new); new 463 kernel/resource.c if (conflict->start < new->start) new 464 kernel/resource.c new->start = conflict->start; new 465 kernel/resource.c if (conflict->end > new->end) new 466 kernel/resource.c new->end = conflict->end; new 468 kernel/resource.c printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); new 7750 kernel/sched.c if (!new && !cur) new 7755 kernel/sched.c new ? (new + idx_new) : &tmp, new 1800 kernel/sysctl.c struct ctl_table *new, **prevp; new 1821 kernel/sysctl.c new = (struct ctl_table *) (header + 1); new 1827 kernel/sysctl.c new->procname = path->procname; new 1828 kernel/sysctl.c new->ctl_name = path->ctl_name; new 1829 kernel/sysctl.c new->mode = 0555; new 1831 kernel/sysctl.c *prevp = new; new 1832 kernel/sysctl.c prevp = &new->child; new 1834 kernel/sysctl.c new += 2; new 2860 kernel/sysctl.c int new; new 2863 kernel/sysctl.c if (get_user(new, (int __user *)newval)) new 2865 kernel/sysctl.c *(int *)(table->data) = new*HZ; new 2894 kernel/sysctl.c int new; new 2897 kernel/sysctl.c if (get_user(new, (int __user *)newval)) new 2899 kernel/sysctl.c *(int *)(table->data) = msecs_to_jiffies(new); new 217 kernel/time/clockevents.c if (new) { new 218 kernel/time/clockevents.c BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); new 219 kernel/time/clockevents.c clockevents_shutdown(new); new 172 kernel/time/timekeeping.c struct clocksource *new; new 176 kernel/time/timekeeping.c new = clocksource_get_next(); new 178 kernel/time/timekeeping.c if (clock == new) new 181 kernel/time/timekeeping.c new->cycle_last = 0; new 182 kernel/time/timekeeping.c now = clocksource_read(new); new 186 kernel/time/timekeeping.c clock = new; new 448 kernel/trace/ftrace.c new = old; new 452 kernel/trace/ftrace.c new = ftrace_call_replace(ip, FTRACE_ADDR); new 466 kernel/trace/ftrace.c new = ftrace_call_replace(ip, FTRACE_ADDR); new 481 kernel/trace/ftrace.c return ftrace_modify_code(ip, old, new); new 487 kernel/trace/ftrace.c unsigned char *new = NULL, *old = NULL; new 494 kernel/trace/ftrace.c new = ftrace_nop_replace(); new 512 kernel/trace/ftrace.c failed = __ftrace_replace_code(rec, old, new, enable); new 390 kernel/user.c struct user_struct *up, *new; new 402 kernel/user.c new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); new 403 kernel/user.c if (!new) new 406 kernel/user.c new->uid = uid; new 407 kernel/user.c atomic_set(&new->__count, 1); new 409 kernel/user.c if (sched_create_user(new) < 0) new 412 kernel/user.c if (uids_user_create(new)) new 427 kernel/user.c key_put(new->uid_keyring); new 428 kernel/user.c key_put(new->session_keyring); new 429 kernel/user.c kmem_cache_free(uid_cachep, new); new 431 kernel/user.c uid_hash_insert(new, hashent); new 432 kernel/user.c up = new; new 443 kernel/user.c sched_destroy_user(new); new 445 kernel/user.c kmem_cache_free(uid_cachep, new); new 112 kernel/workqueue.c unsigned long new; new 116 kernel/workqueue.c new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); new 117 kernel/workqueue.c new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); new 118 kernel/workqueue.c atomic_long_set(&work->data, new); new 661 lib/bitmap.c w = bitmap_weight(new, bits); new 669 lib/bitmap.c set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); new 703 lib/bitmap.c int w = bitmap_weight(new, bits); new 708 lib/bitmap.c return bitmap_ord_to_pos(new, n % w, bits); new 70 lib/debugobjects.c struct debug_obj *new; new 81 lib/debugobjects.c new = kmem_cache_zalloc(obj_cache, gfp); new 82 lib/debugobjects.c if (!new) new 86 lib/debugobjects.c hlist_add_head(&new->node, &obj_pool); new 123 lib/idr.c struct idr_layer *new; new 124 lib/idr.c new = kmem_cache_alloc(idr_layer_cache, gfp_mask); new 125 lib/idr.c if (new == NULL) new 127 lib/idr.c move_to_free_list(idp, new); new 136 lib/idr.c struct idr_layer *p, *new; new 185 lib/idr.c new = get_from_free_list(idp); new 186 lib/idr.c if (!new) new 188 lib/idr.c rcu_assign_pointer(p->ary[m], new); new 202 lib/idr.c struct idr_layer *p, *new; new 223 lib/idr.c if (!(new = get_from_free_list(idp))) { new 229 lib/idr.c for (new = p; p && p != idp->top; new = p) { new 231 lib/idr.c new->ary[0] = NULL; new 232 lib/idr.c new->bitmap = new->count = 0; new 233 lib/idr.c __move_to_free_list(idp, new); new 238 lib/idr.c new->ary[0] = p; new 239 lib/idr.c new->count = 1; new 241 lib/idr.c __set_bit(0, &new->bitmap); new 242 lib/idr.c p = new; new 31 lib/list_debug.c next->prev = new; new 32 lib/list_debug.c new->next = next; new 33 lib/list_debug.c new->prev = prev; new 34 lib/list_debug.c prev->next = new; new 383 lib/rbtree.c parent->rb_left = new; new 385 lib/rbtree.c parent->rb_right = new; new 387 lib/rbtree.c root->rb_node = new; new 390 lib/rbtree.c rb_set_parent(victim->rb_left, new); new 392 lib/rbtree.c rb_set_parent(victim->rb_right, new); new 395 lib/rbtree.c *new = *victim; new 313 mm/memory.c pgtable_t new = pte_alloc_one(mm, address); new 314 mm/memory.c if (!new) new 335 mm/memory.c pmd_populate(mm, pmd, new); new 336 mm/memory.c new = NULL; new 339 mm/memory.c if (new) new 340 mm/memory.c pte_free(mm, new); new 346 mm/memory.c pte_t *new = pte_alloc_one_kernel(&init_mm, address); new 347 mm/memory.c if (!new) new 354 mm/memory.c pmd_populate_kernel(&init_mm, pmd, new); new 355 mm/memory.c new = NULL; new 358 mm/memory.c if (new) new 359 mm/memory.c pte_free_kernel(&init_mm, new); new 2715 mm/memory.c pud_t *new = pud_alloc_one(mm, address); new 2716 mm/memory.c if (!new) new 2723 mm/memory.c pud_free(mm, new); new 2725 mm/memory.c pgd_populate(mm, pgd, new); new 2738 mm/memory.c pmd_t *new = pmd_alloc_one(mm, address); new 2739 mm/memory.c if (!new) new 2747 mm/memory.c pmd_free(mm, new); new 2749 mm/memory.c pud_populate(mm, pud, new); new 2752 mm/memory.c pmd_free(mm, new); new 2754 mm/memory.c pgd_populate(mm, pud, new); new 329 mm/mempolicy.c mpol_rebind_policy(tsk->mempolicy, new); new 344 mm/mempolicy.c mpol_rebind_policy(vma->vm_policy, new); new 542 mm/mempolicy.c err = vma->vm_ops->set_policy(vma, new); new 544 mm/mempolicy.c mpol_get(new); new 545 mm/mempolicy.c vma->vm_policy = new; new 566 mm/mempolicy.c err = policy_vma(vma, new); new 607 mm/mempolicy.c struct mempolicy *new; new 610 mm/mempolicy.c new = mpol_new(mode, flags, nodes); new 611 mm/mempolicy.c if (IS_ERR(new)) new 612 mm/mempolicy.c return PTR_ERR(new); new 623 mm/mempolicy.c current->mempolicy = new; new 625 mm/mempolicy.c if (new && new->mode == MPOL_INTERLEAVE && new 626 mm/mempolicy.c nodes_weight(new->v.nodes)) new 627 mm/mempolicy.c current->il_next = first_node(new->v.nodes); new 932 mm/mempolicy.c struct mempolicy *new; new 957 mm/mempolicy.c new = mpol_new(mode, mode_flags, nmask); new 958 mm/mempolicy.c if (IS_ERR(new)) new 959 mm/mempolicy.c return PTR_ERR(new); new 965 mm/mempolicy.c if (!new) new 980 mm/mempolicy.c err = mbind_range(vma, start, end, new); new 991 mm/mempolicy.c mpol_put(new); new 1111 mm/mempolicy.c nodemask_t new; new 1119 mm/mempolicy.c err = get_nodes(&new, new_nodes, maxnode); new 1151 mm/mempolicy.c if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) { new 1156 mm/mempolicy.c if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) { new 1165 mm/mempolicy.c err = do_migrate_pages(mm, &old, &new, new 1611 mm/mempolicy.c struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); new 1613 mm/mempolicy.c if (!new) new 1619 mm/mempolicy.c *new = *old; new 1620 mm/mempolicy.c atomic_set(&new->refcnt, 1); new 1621 mm/mempolicy.c return new; new 1731 mm/mempolicy.c if (new->start < nd->start) new 1733 mm/mempolicy.c else if (new->end > nd->end) new 1738 mm/mempolicy.c rb_link_node(&new->nd, parent, p); new 1739 mm/mempolicy.c rb_insert_color(&new->nd, &sp->root); new 1740 mm/mempolicy.c pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, new 1741 mm/mempolicy.c new->policy ? new->policy->mode : 0); new 1824 mm/mempolicy.c if (new) new 1825 mm/mempolicy.c sp_insert(sp, new); new 1850 mm/mempolicy.c struct mempolicy *new; new 1853 mm/mempolicy.c new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); new 1855 mm/mempolicy.c if (IS_ERR(new)) new 1861 mm/mempolicy.c mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ new 1862 mm/mempolicy.c mpol_put(new); /* drop initial ref */ new 1870 mm/mempolicy.c struct sp_node *new = NULL; new 1880 mm/mempolicy.c new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); new 1881 mm/mempolicy.c if (!new) new 1884 mm/mempolicy.c err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); new 1885 mm/mempolicy.c if (err && new) new 1886 mm/mempolicy.c kmem_cache_free(sn_cache, new); new 1993 mm/mempolicy.c struct mempolicy *new = NULL; new 2074 mm/mempolicy.c new = mpol_new(mode, mode_flags, &nodes); new 2075 mm/mempolicy.c if (IS_ERR(new)) new 2078 mm/mempolicy.c new->w.user_nodemask = nodes; /* save for contextualization */ new 2087 mm/mempolicy.c *mpol = new; new 133 mm/migrate.c unsigned long addr = page_address_in_vma(new, vma); new 180 mm/migrate.c mem_cgroup_charge(new, mm, GFP_ATOMIC); new 182 mm/migrate.c get_page(new); new 183 mm/migrate.c pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); new 189 mm/migrate.c if (PageAnon(new)) new 190 mm/migrate.c page_add_anon_rmap(new, vma, addr); new 192 mm/migrate.c page_add_file_rmap(new); new 208 mm/migrate.c struct address_space *mapping = page_mapping(new); new 210 mm/migrate.c pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); new 218 mm/migrate.c remove_migration_pte(vma, old, new); new 233 mm/migrate.c mapping = (unsigned long)new->mapping; new 245 mm/migrate.c remove_migration_pte(vma, old, new); new 256 mm/migrate.c if (PageAnon(new)) new 257 mm/migrate.c remove_anon_migration_ptes(old, new); new 259 mm/migrate.c remove_file_migration_ptes(old, new); new 1818 mm/mmap.c struct vm_area_struct *new; new 1827 mm/mmap.c new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); new 1828 mm/mmap.c if (!new) new 1832 mm/mmap.c *new = *vma; new 1835 mm/mmap.c new->vm_end = addr; new 1837 mm/mmap.c new->vm_start = addr; new 1838 mm/mmap.c new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); new 1843 mm/mmap.c kmem_cache_free(vm_area_cachep, new); new 1846 mm/mmap.c vma_set_policy(new, pol); new 1848 mm/mmap.c if (new->vm_file) { new 1849 mm/mmap.c get_file(new->vm_file); new 1854 mm/mmap.c if (new->vm_ops && new->vm_ops->open) new 1855 mm/mmap.c new->vm_ops->open(new); new 1859 mm/mmap.c ((addr - new->vm_start) >> PAGE_SHIFT), new); new 1861 mm/mmap.c vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); new 1454 mm/shmem.c return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); new 3895 mm/slab.c struct array_cache *new[NR_CPUS]; new 3900 mm/slab.c struct ccupdate_struct *new = info; new 3904 mm/slab.c old = cpu_cache_get(new->cachep); new 3906 mm/slab.c new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; new 3907 mm/slab.c new->new[smp_processor_id()] = old; new 3914 mm/slab.c struct ccupdate_struct *new; new 3917 mm/slab.c new = kzalloc(sizeof(*new), GFP_KERNEL); new 3918 mm/slab.c if (!new) new 3922 mm/slab.c new->new[i] = alloc_arraycache(cpu_to_node(i), limit, new 3924 mm/slab.c if (!new->new[i]) { new 3926 mm/slab.c kfree(new->new[i]); new 3927 mm/slab.c kfree(new); new 3931 mm/slab.c new->cachep = cachep; new 3933 mm/slab.c on_each_cpu(do_ccupdate_local, (void *)new, 1); new 3941 mm/slab.c struct array_cache *ccold = new->new[i]; new 3949 mm/slab.c kfree(new); new 1505 mm/slub.c struct page *new; new 1539 mm/slub.c new = get_partial(s, gfpflags, node); new 1540 mm/slub.c if (new) { new 1541 mm/slub.c c->page = new; new 1549 mm/slub.c new = new_slab(s, gfpflags, node); new 1554 mm/slub.c if (new) { new 1559 mm/slub.c slab_lock(new); new 1560 mm/slub.c __SetPageSlubFrozen(new); new 1561 mm/slub.c c->page = new; new 177 net/802/garp.c d = garp_attr_cmp(attr, new->data, new->dlen, new->type); new 183 net/802/garp.c rb_link_node(&new->node, parent, p); new 184 net/802/garp.c rb_insert_color(&new->node, &app->gid); new 872 net/bluetooth/rfcomm/tty.c struct ktermios *new = tty->termios; new 874 net/bluetooth/rfcomm/tty.c int new_baud_rate = tty_termios_baud_rate(new); new 887 net/bluetooth/rfcomm/tty.c if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS)) new 891 net/bluetooth/rfcomm/tty.c if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) || new 892 net/bluetooth/rfcomm/tty.c ((old->c_cflag & PARODD) != (new->c_cflag & PARODD)) ) { new 898 net/bluetooth/rfcomm/tty.c if (new->c_cflag & PARENB) { new 899 net/bluetooth/rfcomm/tty.c if (new->c_cflag & PARODD) { new 912 net/bluetooth/rfcomm/tty.c if (old->c_cc[VSTOP] != new->c_cc[VSTOP]) { new 914 net/bluetooth/rfcomm/tty.c x_on = new->c_cc[VSTOP]; new 921 net/bluetooth/rfcomm/tty.c if (old->c_cc[VSTART] != new->c_cc[VSTART]) { new 923 net/bluetooth/rfcomm/tty.c x_off = new->c_cc[VSTART]; new 931 net/bluetooth/rfcomm/tty.c if ((old->c_cflag & CSTOPB) != (new->c_cflag & CSTOPB)) new 937 net/bluetooth/rfcomm/tty.c if (new->c_cflag & CSTOPB) { new 944 net/bluetooth/rfcomm/tty.c if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE)) new 947 net/bluetooth/rfcomm/tty.c switch (new->c_cflag & CSIZE) { new 1001 net/core/neighbour.c if (!(new & NUD_VALID)) { new 1005 net/core/neighbour.c neigh->nud_state = new; new 1034 net/core/neighbour.c if (new & NUD_CONNECTED) new 1049 net/core/neighbour.c new = NUD_STALE; new 1053 net/core/neighbour.c if (lladdr == neigh->ha && new == NUD_STALE && new 1057 net/core/neighbour.c new = old; new 1061 net/core/neighbour.c if (new != old) { new 1063 net/core/neighbour.c if (new & NUD_IN_TIMER) new 1065 net/core/neighbour.c ((new & NUD_REACHABLE) ? new 1068 net/core/neighbour.c neigh->nud_state = new; new 1074 net/core/neighbour.c if (!(new & NUD_CONNECTED)) new 1079 net/core/neighbour.c if (new == old) new 1081 net/core/neighbour.c if (new & NUD_CONNECTED) new 70 net/core/net-sysfs.c unsigned long new; new 76 net/core/net-sysfs.c new = simple_strtoul(buf, &endp, 0); new 82 net/core/net-sysfs.c if ((ret = (*set)(net, new)) == 0) new 486 net/core/skbuff.c new->tstamp = old->tstamp; new 487 net/core/skbuff.c new->dev = old->dev; new 488 net/core/skbuff.c new->transport_header = old->transport_header; new 489 net/core/skbuff.c new->network_header = old->network_header; new 490 net/core/skbuff.c new->mac_header = old->mac_header; new 491 net/core/skbuff.c new->dst = dst_clone(old->dst); new 493 net/core/skbuff.c new->sp = secpath_get(old->sp); new 495 net/core/skbuff.c memcpy(new->cb, old->cb, sizeof(old->cb)); new 496 net/core/skbuff.c new->csum_start = old->csum_start; new 497 net/core/skbuff.c new->csum_offset = old->csum_offset; new 498 net/core/skbuff.c new->local_df = old->local_df; new 499 net/core/skbuff.c new->pkt_type = old->pkt_type; new 500 net/core/skbuff.c new->ip_summed = old->ip_summed; new 501 net/core/skbuff.c skb_copy_queue_mapping(new, old); new 502 net/core/skbuff.c new->priority = old->priority; new 504 net/core/skbuff.c new->ipvs_property = old->ipvs_property; new 506 net/core/skbuff.c new->protocol = old->protocol; new 507 net/core/skbuff.c new->mark = old->mark; new 508 net/core/skbuff.c __nf_copy(new, old); new 511 net/core/skbuff.c new->nf_trace = old->nf_trace; new 514 net/core/skbuff.c new->tc_index = old->tc_index; new 516 net/core/skbuff.c new->tc_verd = old->tc_verd; new 519 net/core/skbuff.c new->vlan_tci = old->vlan_tci; new 521 net/core/skbuff.c skb_copy_secmark(new, old); new 614 net/core/skbuff.c unsigned long offset = new->data - old->data; new 617 net/core/skbuff.c __copy_skb_header(new, old); new 621 net/core/skbuff.c new->transport_header += offset; new 622 net/core/skbuff.c new->network_header += offset; new 623 net/core/skbuff.c new->mac_header += offset; new 625 net/core/skbuff.c skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; new 626 net/core/skbuff.c skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; new 627 net/core/skbuff.c skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; new 144 net/dccp/ccids/lib/loss_interval.c struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new; new 149 net/dccp/ccids/lib/loss_interval.c new = tfrc_lh_demand_next(lh); new 150 net/dccp/ccids/lib/loss_interval.c if (unlikely(new == NULL)) { new 155 net/dccp/ccids/lib/loss_interval.c new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno; new 156 net/dccp/ccids/lib/loss_interval.c new->li_ccval = tfrc_rx_hist_loss_prev(rh)->tfrchrx_ccval; new 157 net/dccp/ccids/lib/loss_interval.c new->li_is_closed = 0; new 160 net/dccp/ccids/lib/loss_interval.c lh->i_mean = new->li_length = (*calc_first_li)(sk); new 162 net/dccp/ccids/lib/loss_interval.c cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno); new 163 net/dccp/ccids/lib/loss_interval.c new->li_length = dccp_delta_seqno(new->li_seqno, new 1293 net/ipv4/devinet.c int new; new 1302 net/ipv4/devinet.c if (get_user(new, (int __user *)newval)) new 1305 net/ipv4/devinet.c if (new == *valp) new 1324 net/ipv4/devinet.c *valp = new; new 937 net/ipv4/fib_trie.c hlist_add_head_rcu(&new->hlist, head); new 940 net/ipv4/fib_trie.c if (new->plen > li->plen) new 946 net/ipv4/fib_trie.c hlist_add_after_rcu(&last->hlist, &new->hlist); new 948 net/ipv4/fib_trie.c hlist_add_before_rcu(&new->hlist, &li->hlist); new 339 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c st->new, new 306 net/ipv4/netfilter/nf_conntrack_proto_icmp.c .new = icmp_new, new 565 net/ipv4/netfilter/nf_nat_core.c struct nf_conn_nat *new_nat = new; new 383 net/ipv4/netfilter/nf_nat_h323.c nf_nat_follow_master(new, this); new 388 net/ipv4/netfilter/nf_nat_h323.c BUG_ON(new->status & IPS_NAT_DONE_MASK); new 392 net/ipv4/netfilter/nf_nat_h323.c range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; new 393 net/ipv4/netfilter/nf_nat_h323.c nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); new 399 net/ipv4/netfilter/nf_nat_h323.c new->master->tuplehash[!this->dir].tuple.src.u3.ip; new 400 net/ipv4/netfilter/nf_nat_h323.c nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); new 474 net/ipv4/netfilter/nf_nat_h323.c BUG_ON(new->status & IPS_NAT_DONE_MASK); new 478 net/ipv4/netfilter/nf_nat_h323.c range.min_ip = range.max_ip = new->tuplehash[!this->dir].tuple.src.u3.ip; new 479 net/ipv4/netfilter/nf_nat_h323.c nf_nat_setup_info(new, &range, IP_NAT_MANIP_SRC); new 485 net/ipv4/netfilter/nf_nat_h323.c nf_nat_setup_info(new, &range, IP_NAT_MANIP_DST); new 1302 net/ipv4/route.c netevent.new = &rt->u.dst; new 2588 net/ipv4/route.c struct dst_entry *new = &rt->u.dst; new 2590 net/ipv4/route.c atomic_set(&new->__refcnt, 1); new 2591 net/ipv4/route.c new->__use = 1; new 2592 net/ipv4/route.c new->input = dst_discard; new 2593 net/ipv4/route.c new->output = dst_discard; new 2594 net/ipv4/route.c memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); new 2596 net/ipv4/route.c new->dev = ort->u.dst.dev; new 2597 net/ipv4/route.c if (new->dev) new 2598 net/ipv4/route.c dev_hold(new->dev); new 2617 net/ipv4/route.c dst_free(new); new 2932 net/ipv4/route.c int new = ip_rt_secret_interval; new 2933 net/ipv4/route.c int diff = new - old; new 2942 net/ipv4/route.c if (!new) new 2953 net/ipv4/route.c net->ipv4.rt_secret_timer.expires = new; new 3992 net/ipv6/addrconf.c int new; new 3998 net/ipv6/addrconf.c if (get_user(new, (int __user *)newval)) new 4000 net/ipv6/addrconf.c if (new == *valp) new 4016 net/ipv6/addrconf.c *valp = new; new 273 net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c .new = icmpv6_new, new 810 net/ipv6/route.c struct dst_entry *new = NULL; new 813 net/ipv6/route.c new = &rt->u.dst; new 815 net/ipv6/route.c atomic_set(&new->__refcnt, 1); new 816 net/ipv6/route.c new->__use = 1; new 817 net/ipv6/route.c new->input = dst_discard; new 818 net/ipv6/route.c new->output = dst_discard; new 820 net/ipv6/route.c memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); new 821 net/ipv6/route.c new->dev = ort->u.dst.dev; new 822 net/ipv6/route.c if (new->dev) new 823 net/ipv6/route.c dev_hold(new->dev); new 838 net/ipv6/route.c dst_free(new); new 842 net/ipv6/route.c *dstp = new; new 843 net/ipv6/route.c return (new ? 0 : -ENOMEM); new 1511 net/ipv6/route.c netevent.new = &nrt->u.dst; new 833 net/irda/af_irda.c struct irda_sock *new, *self = irda_sk(sk); new 891 net/irda/af_irda.c new = irda_sk(newsk); new 894 net/irda/af_irda.c new->tsap = irttp_dup(self->tsap, new); new 895 net/irda/af_irda.c if (!new->tsap) { new 901 net/irda/af_irda.c new->stsap_sel = new->tsap->stsap_sel; new 902 net/irda/af_irda.c new->dtsap_sel = new->tsap->dtsap_sel; new 903 net/irda/af_irda.c new->saddr = irttp_get_saddr(new->tsap); new 904 net/irda/af_irda.c new->daddr = irttp_get_daddr(new->tsap); new 906 net/irda/af_irda.c new->max_sdu_size_tx = self->max_sdu_size_tx; new 907 net/irda/af_irda.c new->max_sdu_size_rx = self->max_sdu_size_rx; new 908 net/irda/af_irda.c new->max_data_size = self->max_data_size; new 909 net/irda/af_irda.c new->max_header_size = self->max_header_size; new 911 net/irda/af_irda.c memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info)); new 924 net/irda/af_irda.c irda_connect_response(new); new 66 net/irda/discovery.c new->firststamp = new->timestamp; new 83 net/irda/discovery.c if ((node->data.saddr == new->data.saddr) && new 84 net/irda/discovery.c ((node->data.daddr == new->data.daddr) || new 85 net/irda/discovery.c (strcmp(node->data.info, new->data.info) == 0))) new 92 net/irda/discovery.c if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints)) new 94 net/irda/discovery.c new->firststamp = node->firststamp; new 100 net/irda/discovery.c hashbin_insert(cachelog, (irda_queue_t *) new, new->data.daddr, NULL); new 779 net/irda/iriap.c struct iriap_cb *self, *new; new 790 net/irda/iriap.c new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); new 791 net/irda/iriap.c if (!new) { new 797 net/irda/iriap.c new->lsap = irlmp_dup(self->lsap, new); new 798 net/irda/iriap.c if (!new->lsap) { new 803 net/irda/iriap.c new->max_data_size = max_seg_size; new 804 net/irda/iriap.c new->max_header_size = max_header_size; new 809 net/irda/iriap.c iriap_do_server_event(new, IAP_LM_CONNECT_INDICATION, skb); new 117 net/irda/irlan/irlan_common.c struct irlan_cb *new; new 147 net/irda/irlan/irlan_common.c new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY); new 148 net/irda/irlan/irlan_common.c if (!new) new 152 net/irda/irlan/irlan_common.c irlan_provider_open_ctrl_tsap(new); new 629 net/irda/irlmp.c struct lsap_cb *new; new 648 net/irda/irlmp.c new = kmemdup(orig, sizeof(*new), GFP_ATOMIC); new 649 net/irda/irlmp.c if (!new) { new 657 net/irda/irlmp.c new->conn_skb = NULL; new 662 net/irda/irlmp.c new->notify.instance = instance; new 664 net/irda/irlmp.c init_timer(&new->watchdog_timer); new 666 net/irda/irlmp.c hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) new, new 667 net/irda/irlmp.c (long) new, NULL); new 671 net/irda/irlmp.c new->lap->cache.valid = FALSE; new 674 net/irda/irlmp.c return new; new 750 net/irda/irnet/irnet_irda.c irnet_socket * new = (irnet_socket *) NULL; new 769 net/irda/irnet/irnet_irda.c new = (irnet_socket *) hashbin_find(irnet_server.list, new 771 net/irda/irnet/irnet_irda.c if(new) new 773 net/irda/irnet/irnet_irda.c new, new->rname); new 780 net/irda/irnet/irnet_irda.c if(new == (irnet_socket *) NULL) new 782 net/irda/irnet/irnet_irda.c new = (irnet_socket *) hashbin_get_first(irnet_server.list); new 783 net/irda/irnet/irnet_irda.c while(new !=(irnet_socket *) NULL) new 786 net/irda/irnet/irnet_irda.c if((new->rdaddr == self->daddr) || (new->daddr == self->daddr)) new 790 net/irda/irnet/irnet_irda.c new, self->daddr); new 793 net/irda/irnet/irnet_irda.c new = (irnet_socket *) hashbin_get_next(irnet_server.list); new 798 net/irda/irnet/irnet_irda.c if(new == (irnet_socket *) NULL) new 800 net/irda/irnet/irnet_irda.c new = (irnet_socket *) hashbin_get_first(irnet_server.list); new 801 net/irda/irnet/irnet_irda.c while(new !=(irnet_socket *) NULL) new 804 net/irda/irnet/irnet_irda.c if(!(test_bit(0, &new->ttp_open)) && (new->rdaddr == DEV_ADDR_ANY) && new 805 net/irda/irnet/irnet_irda.c (new->rname[0] == '\0') && (new->ppp_open)) new 809 net/irda/irnet/irnet_irda.c new); new 812 net/irda/irnet/irnet_irda.c new = (irnet_socket *) hashbin_get_next(irnet_server.list); new 819 net/irda/irnet/irnet_irda.c DEXIT(IRDA_SERV_TRACE, " - new = 0x%p\n", new); new 820 net/irda/irnet/irnet_irda.c return new; new 838 net/irda/irnet/irnet_irda.c server, new); new 841 net/irda/irnet/irnet_irda.c new->tsap = irttp_dup(server->tsap, new); new 842 net/irda/irnet/irnet_irda.c DABORT(new->tsap == NULL, -1, IRDA_SERV_ERROR, "dup failed!\n"); new 845 net/irda/irnet/irnet_irda.c new->stsap_sel = new->tsap->stsap_sel; new 846 net/irda/irnet/irnet_irda.c new->dtsap_sel = new->tsap->dtsap_sel; new 847 net/irda/irnet/irnet_irda.c new->saddr = irttp_get_saddr(new->tsap); new 848 net/irda/irnet/irnet_irda.c new->daddr = irttp_get_daddr(new->tsap); new 850 net/irda/irnet/irnet_irda.c new->max_header_size = max_header_size; new 851 net/irda/irnet/irnet_irda.c new->max_sdu_size_tx = max_sdu_size; new 852 net/irda/irnet/irnet_irda.c new->max_data_size = max_sdu_size; new 856 net/irda/irnet/irnet_irda.c new->max_data_size = irttp_get_max_seg_size(new->tsap); new 863 net/irda/irnet/irnet_irda.c irttp_connect_response(new->tsap, new->max_sdu_size_rx, NULL); new 866 net/irda/irnet/irnet_irda.c set_bit(0, &new->ttp_open); new 870 net/irda/irnet/irnet_irda.c clear_bit(0, &new->ttp_connect); new 871 net/irda/irnet/irnet_irda.c if(new->iriap) new 873 net/irda/irnet/irnet_irda.c iriap_close(new->iriap); new 874 net/irda/irnet/irnet_irda.c new->iriap = NULL; new 876 net/irda/irnet/irnet_irda.c if(new->discoveries != NULL) new 878 net/irda/irnet/irnet_irda.c kfree(new->discoveries); new 879 net/irda/irnet/irnet_irda.c new->discoveries = NULL; new 887 net/irda/irnet/irnet_irda.c ppp_output_wakeup(&new->chan); new 891 net/irda/irnet/irnet_irda.c irnet_post_event(new, IRNET_CONNECT_FROM, new 892 net/irda/irnet/irnet_irda.c new->saddr, new->daddr, server->rname, 0); new 1342 net/irda/irnet/irnet_irda.c irnet_socket * new = (irnet_socket *) NULL; new 1350 net/irda/irnet/irnet_irda.c new = irnet_find_socket(server); new 1353 net/irda/irnet/irnet_irda.c if(new == (irnet_socket *) NULL) new 1361 net/irda/irnet/irnet_irda.c if(test_bit(0, &new->ttp_open)) new 1406 net/irda/irnet/irnet_irda.c && (test_and_clear_bit(0, &new->ttp_connect))) new 1413 net/irda/irnet/irnet_irda.c if(new->tsap != NULL) new 1418 net/irda/irnet/irnet_irda.c irttp_close_tsap(new->tsap); new 1419 net/irda/irnet/irnet_irda.c new->tsap = NULL; new 1434 net/irda/irnet/irnet_irda.c if((test_bit(0, &new->ttp_connect)) || (new->tsap != NULL)) new 1444 net/irda/irnet/irnet_irda.c irnet_connect_socket(server, new, qos, max_sdu_size, max_header_size); new 1452 net/irda/irnet/irnet_irda.c irnet_data_indication(new, new->tsap, skb); new 1432 net/irda/irttp.c struct tsap_cb *new; new 1448 net/irda/irttp.c new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); new 1449 net/irda/irttp.c if (!new) { new 1455 net/irda/irttp.c memcpy(new, orig, sizeof(struct tsap_cb)); new 1461 net/irda/irttp.c new->lsap = irlmp_dup(orig->lsap, new); new 1462 net/irda/irttp.c if (!new->lsap) { new 1464 net/irda/irttp.c kfree(new); new 1469 net/irda/irttp.c new->notify.instance = instance; new 1472 net/irda/irttp.c irttp_init_tsap(new); new 1475 net/irda/irttp.c hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL); new 1477 net/irda/irttp.c return new; new 282 net/irda/qos.c IRDA_ASSERT(new != NULL, return;); new 285 net/irda/qos.c qos->baud_rate.bits &= new->baud_rate.bits; new 286 net/irda/qos.c qos->window_size.bits &= new->window_size.bits; new 287 net/irda/qos.c qos->min_turn_time.bits &= new->min_turn_time.bits; new 288 net/irda/qos.c qos->max_turn_time.bits &= new->max_turn_time.bits; new 289 net/irda/qos.c qos->data_size.bits &= new->data_size.bits; new 290 net/irda/qos.c qos->link_disc_time.bits &= new->link_disc_time.bits; new 291 net/irda/qos.c qos->additional_bofs.bits &= new->additional_bofs.bits; new 341 net/key/af_key.c *new = *orig; new 387 net/mac80211/cfg.c struct beacon_data *new, *old; new 434 net/mac80211/cfg.c size = sizeof(*new) + new_head_len + new_tail_len; new 436 net/mac80211/cfg.c new = kzalloc(size, GFP_KERNEL); new 437 net/mac80211/cfg.c if (!new) new 444 net/mac80211/cfg.c new->dtim_period = params->dtim_period; new 446 net/mac80211/cfg.c new->dtim_period = old->dtim_period; new 452 net/mac80211/cfg.c new->head = ((u8 *) new) + sizeof(*new); new 453 net/mac80211/cfg.c new->tail = new->head + new_head_len; new 454 net/mac80211/cfg.c new->head_len = new_head_len; new 455 net/mac80211/cfg.c new->tail_len = new_tail_len; new 459 net/mac80211/cfg.c memcpy(new->head, params->head, new_head_len); new 461 net/mac80211/cfg.c memcpy(new->head, old->head, new_head_len); new 465 net/mac80211/cfg.c memcpy(new->tail, params->tail, new_tail_len); new 468 net/mac80211/cfg.c memcpy(new->tail, old->tail, new_tail_len); new 470 net/mac80211/cfg.c rcu_assign_pointer(sdata->u.ap.beacon, new); new 229 net/mac80211/key.c if (new) new 230 net/mac80211/key.c list_add(&new->list, &sdata->key_list); new 233 net/mac80211/key.c rcu_assign_pointer(sta->key, new); new 235 net/mac80211/key.c WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx); new 240 net/mac80211/key.c idx = new->conf.keyidx; new 244 net/mac80211/key.c if (defkey && !new) new 247 net/mac80211/key.c rcu_assign_pointer(sdata->keys[idx], new); new 248 net/mac80211/key.c if (defkey && new) new 249 net/mac80211/key.c __ieee80211_set_default_key(sdata, new->conf.keyidx); new 229 net/netfilter/core.c attach(new, skb); new 553 net/netfilter/nf_conntrack_core.c if (!l4proto->new(ct, skb, dataoff)) { new 592 net/netfilter/nf_conntrack_core.c NF_CT_STAT_INC(net, new); new 226 net/netfilter/nf_conntrack_expect.c struct nf_conntrack_expect *new; new 228 net/netfilter/nf_conntrack_expect.c new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC); new 229 net/netfilter/nf_conntrack_expect.c if (!new) new 232 net/netfilter/nf_conntrack_expect.c new->master = me; new 233 net/netfilter/nf_conntrack_expect.c atomic_set(&new->use, 1); new 234 net/netfilter/nf_conntrack_expect.c INIT_RCU_HEAD(&new->rcu); new 235 net/netfilter/nf_conntrack_expect.c return new; new 341 net/netfilter/nf_conntrack_expect.c if (exp->class == new->class) new 77 net/netfilter/nf_conntrack_extend.c struct nf_ct_ext *new; new 98 net/netfilter/nf_conntrack_extend.c new = __krealloc(ct->ext, newlen, gfp); new 99 net/netfilter/nf_conntrack_extend.c if (!new) new 102 net/netfilter/nf_conntrack_extend.c if (new != ct->ext) { new 110 net/netfilter/nf_conntrack_extend.c t->move((void *)new + new->offset[i], new 115 net/netfilter/nf_conntrack_extend.c ct->ext = new; new 118 net/netfilter/nf_conntrack_extend.c new->offset[id] = newoff; new 119 net/netfilter/nf_conntrack_extend.c new->len = newlen; new 120 net/netfilter/nf_conntrack_extend.c memset((void *)new + newoff, 0, newlen - newoff); new 121 net/netfilter/nf_conntrack_extend.c return (void *)new + newoff; new 742 net/netfilter/nf_conntrack_proto_dccp.c .new = dccp_new, new 767 net/netfilter/nf_conntrack_proto_dccp.c .new = dccp_new, new 101 net/netfilter/nf_conntrack_proto_generic.c .new = new, new 291 net/netfilter/nf_conntrack_proto_gre.c .new = gre_new, new 667 net/netfilter/nf_conntrack_proto_sctp.c .new = sctp_new, new 695 net/netfilter/nf_conntrack_proto_sctp.c .new = sctp_new, new 1395 net/netfilter/nf_conntrack_proto_tcp.c .new = tcp_new, new 1425 net/netfilter/nf_conntrack_proto_tcp.c .new = tcp_new, new 191 net/netfilter/nf_conntrack_proto_udp.c .new = udp_new, new 218 net/netfilter/nf_conntrack_proto_udp.c .new = udp_new, new 179 net/netfilter/nf_conntrack_proto_udplite.c .new = udplite_new, new 202 net/netfilter/nf_conntrack_proto_udplite.c .new = udplite_new, new 248 net/netfilter/nf_conntrack_standalone.c st->new, new 1104 net/netlink/af_netlink.c int old, new = !!is_new, subscriptions; new 1107 net/netlink/af_netlink.c subscriptions = nlk->subscriptions - old + new; new 1108 net/netlink/af_netlink.c if (new) new 675 net/rose/rose_route.c if (!new) spin_lock_bh(&rose_node_list_lock); new 679 net/rose/rose_route.c if (new) { new 705 net/rose/rose_route.c if (!new) spin_unlock_bh(&rose_node_list_lock); new 611 net/rxrpc/ar-connection.c const char *new = "old"; new 697 net/rxrpc/ar-connection.c new = "new"; new 700 net/rxrpc/ar-connection.c _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id); new 123 net/rxrpc/ar-peer.c const char *new = "old"; new 177 net/rxrpc/ar-peer.c new = "new"; new 181 net/rxrpc/ar-peer.c new, new 78 net/rxrpc/ar-transport.c const char *new = "old"; new 118 net/rxrpc/ar-transport.c new = "new"; new 122 net/rxrpc/ar-transport.c new, new 352 net/sched/cls_route.c if (new && handle & 0x8000) new 375 net/sched/cls_route.c if (handle && new) { new 676 net/sched/sch_api.c if (new || old) new 677 net/sched/sch_api.c qdisc_notify(skb, n, clid, old, new); new 705 net/sched/sch_api.c (new && new->flags & TCQ_F_INGRESS)) { new 719 net/sched/sch_api.c old = dev_graft_qdisc(dev_queue, new); new 720 net/sched/sch_api.c if (new && i > 0) new 721 net/sched/sch_api.c atomic_inc(&new->refcnt); new 723 net/sched/sch_api.c notify_and_destroy(skb, n, classid, old, new); new 736 net/sched/sch_api.c err = cops->graft(parent, cl, new, &old); new 741 net/sched/sch_api.c notify_and_destroy(skb, n, classid, old, new); new 1210 net/sched/sch_api.c if (new) { new 1211 net/sched/sch_api.c if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) new 100 net/sched/sch_atm.c sch, p, flow, new, old); new 103 net/sched/sch_atm.c if (!new) new 104 net/sched/sch_atm.c new = &noop_qdisc; new 105 net/sched/sch_atm.c *old = xchg(&flow->q, new); new 492 net/sched/sch_atm.c struct sk_buff *new; new 494 net/sched/sch_atm.c new = skb_realloc_headroom(skb, flow->hdr_len); new 496 net/sched/sch_atm.c if (!new) new 498 net/sched/sch_atm.c skb = new; new 195 net/sched/sch_cbq.c struct cbq_class *cl, *new; new 198 net/sched/sch_cbq.c if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this) new 199 net/sched/sch_cbq.c return new; new 1658 net/sched/sch_cbq.c if (new == NULL) { new 1659 net/sched/sch_cbq.c new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, new 1662 net/sched/sch_cbq.c if (new == NULL) new 1667 net/sched/sch_cbq.c new->reshape_fail = cbq_reshape_fail; new 1671 net/sched/sch_cbq.c *old = xchg(&cl->q, new); new 60 net/sched/sch_dsmark.c sch, p, new, old); new 62 net/sched/sch_dsmark.c if (new == NULL) { new 63 net/sched/sch_dsmark.c new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, new 66 net/sched/sch_dsmark.c if (new == NULL) new 67 net/sched/sch_dsmark.c new = &noop_qdisc; new 71 net/sched/sch_dsmark.c *old = xchg(&p->q, new); new 1204 net/sched/sch_hfsc.c if (new == NULL) { new 1205 net/sched/sch_hfsc.c new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, new 1208 net/sched/sch_hfsc.c if (new == NULL) new 1209 net/sched/sch_hfsc.c new = &noop_qdisc; new 1214 net/sched/sch_hfsc.c *old = xchg(&cl->qdisc, new); new 1137 net/sched/sch_htb.c if (new == NULL && new 1138 net/sched/sch_htb.c (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, new 1144 net/sched/sch_htb.c if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) { new 308 net/sched/sch_multiq.c if (new == NULL) new 309 net/sched/sch_multiq.c new = &noop_qdisc; new 313 net/sched/sch_multiq.c q->queues[band] = new; new 646 net/sched/sch_netem.c if (new == NULL) new 647 net/sched/sch_netem.c new = &noop_qdisc; new 650 net/sched/sch_netem.c *old = xchg(&q->qdisc, new); new 280 net/sched/sch_prio.c if (new == NULL) new 281 net/sched/sch_prio.c new = &noop_qdisc; new 285 net/sched/sch_prio.c q->queues[band] = new; new 291 net/sched/sch_red.c if (new == NULL) new 292 net/sched/sch_red.c new = &noop_qdisc; new 295 net/sched/sch_red.c *old = xchg(&q->qdisc, new); new 397 net/sched/sch_tbf.c if (new == NULL) new 398 net/sched/sch_tbf.c new = &noop_qdisc; new 401 net/sched/sch_tbf.c *old = xchg(&q->qdisc, new); new 1122 net/sctp/associola.c asoc->c = new->c; new 1123 net/sctp/associola.c asoc->peer.rwnd = new->peer.rwnd; new 1124 net/sctp/associola.c asoc->peer.sack_needed = new->peer.sack_needed; new 1125 net/sctp/associola.c asoc->peer.i = new->peer.i; new 1132 net/sctp/associola.c if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) new 1145 net/sctp/associola.c asoc->next_tsn = new->next_tsn; new 1146 net/sctp/associola.c asoc->ctsn_ack_point = new->ctsn_ack_point; new 1147 net/sctp/associola.c asoc->adv_peer_ack_point = new->adv_peer_ack_point; new 1168 net/sctp/associola.c list_for_each_entry(trans, &new->peer.transport_addr_list, new 1179 net/sctp/associola.c asoc->ssnmap = new->ssnmap; new 1180 net/sctp/associola.c new->ssnmap = NULL; new 1195 net/sctp/associola.c asoc->peer.peer_random = new->peer.peer_random; new 1196 net/sctp/associola.c new->peer.peer_random = NULL; new 1199 net/sctp/associola.c asoc->peer.peer_chunks = new->peer.peer_chunks; new 1200 net/sctp/associola.c new->peer.peer_chunks = NULL; new 1203 net/sctp/associola.c asoc->peer.peer_hmacs = new->peer.peer_hmacs; new 1204 net/sctp/associola.c new->peer.peer_hmacs = NULL; new 102 net/sctp/auth.c struct sctp_shared_key *new; new 105 net/sctp/auth.c new = kzalloc(sizeof(struct sctp_shared_key), gfp); new 106 net/sctp/auth.c if (!new) new 109 net/sctp/auth.c INIT_LIST_HEAD(&new->key_list); new 110 net/sctp/auth.c new->key_id = key_id; new 112 net/sctp/auth.c return new; new 199 net/sctp/auth.c struct sctp_auth_bytes *new; new 207 net/sctp/auth.c new = kmalloc(sizeof(struct sctp_auth_bytes) + len, gfp); new 208 net/sctp/auth.c if (!new) new 211 net/sctp/auth.c new->len = len; new 213 net/sctp/auth.c memcpy(new->data, random, ntohs(random->param_hdr.length)); new 217 net/sctp/auth.c memcpy(new->data + offset, chunks, new 222 net/sctp/auth.c memcpy(new->data + offset, hmacs, ntohs(hmacs->param_hdr.length)); new 224 net/sctp/auth.c return new; new 367 net/sctp/auth.c struct sctp_shared_key *new; new 372 net/sctp/auth.c new = sctp_auth_shkey_create(sh_key->key_id, gfp); new 373 net/sctp/auth.c if (!new) new 376 net/sctp/auth.c new->key = sh_key->key; new 377 net/sctp/auth.c sctp_auth_key_hold(new->key); new 378 net/sctp/auth.c list_add(&new->key_list, &asoc->endpoint_shared_keys); new 177 net/sctp/bind_addr.c memcpy(&addr->a, new, sizeof(*new)); new 369 net/sctp/outqueue.c nchunk = list_entry(new, struct sctp_chunk, transmitted_list); new 376 net/sctp/outqueue.c list_add(new, pos->prev); new 382 net/sctp/outqueue.c list_add_tail(new, head); new 364 net/sctp/tsnmap.c unsigned long *new; new 374 net/sctp/tsnmap.c new = kzalloc(len>>3, GFP_ATOMIC); new 375 net/sctp/tsnmap.c if (!new) new 378 net/sctp/tsnmap.c bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn); new 380 net/sctp/tsnmap.c map->tsn_map = new; new 334 net/sctp/ulpqueue.c struct sk_buff *new = NULL; new 360 net/sctp/ulpqueue.c new = skb_copy(f_frag, GFP_ATOMIC); new 361 net/sctp/ulpqueue.c if (!new) new 364 net/sctp/ulpqueue.c sctp_skb_set_owner_r(new, f_frag->sk); new 366 net/sctp/ulpqueue.c skb_shinfo(new)->frag_list = pos; new 375 net/sctp/ulpqueue.c if (new) { new 377 net/sctp/ulpqueue.c f_frag = new; new 146 net/sunrpc/auth.c struct rpc_cred_cache *new; new 149 net/sunrpc/auth.c new = kmalloc(sizeof(*new), GFP_KERNEL); new 150 net/sunrpc/auth.c if (!new) new 153 net/sunrpc/auth.c INIT_HLIST_HEAD(&new->hashtable[i]); new 154 net/sunrpc/auth.c spin_lock_init(&new->lock); new 155 net/sunrpc/auth.c auth->au_credcache = new; new 291 net/sunrpc/auth.c *entry, *new; new 314 net/sunrpc/auth.c new = auth->au_ops->crcreate(auth, acred, flags); new 315 net/sunrpc/auth.c if (IS_ERR(new)) { new 316 net/sunrpc/auth.c cred = new; new 328 net/sunrpc/auth.c cred = new; new 332 net/sunrpc/auth.c list_add_tail(&new->cr_lru, &free); new 915 net/sunrpc/auth_gss/auth_gss.c struct rpc_cred *new; new 917 net/sunrpc/auth_gss/auth_gss.c new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); new 918 net/sunrpc/auth_gss/auth_gss.c if (IS_ERR(new)) new 919 net/sunrpc/auth_gss/auth_gss.c return PTR_ERR(new); new 920 net/sunrpc/auth_gss/auth_gss.c task->tk_msg.rpc_cred = new; new 72 net/sunrpc/auth_gss/gss_mech_switch.c char *new; new 74 net/sunrpc/auth_gss/gss_mech_switch.c new = kmalloc(strlen(name) + strlen(prefix) + 1, GFP_KERNEL); new 75 net/sunrpc/auth_gss/gss_mech_switch.c if (new) { new 76 net/sunrpc/auth_gss/gss_mech_switch.c strcpy(new, prefix); new 77 net/sunrpc/auth_gss/gss_mech_switch.c strcat(new, name); new 79 net/sunrpc/auth_gss/gss_mech_switch.c return new; new 128 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *new = container_of(cnew, struct rsi, h); new 131 net/sunrpc/auth_gss/svcauth_gss.c new->out_handle.data = NULL; new 132 net/sunrpc/auth_gss/svcauth_gss.c new->out_handle.len = 0; new 133 net/sunrpc/auth_gss/svcauth_gss.c new->out_token.data = NULL; new 134 net/sunrpc/auth_gss/svcauth_gss.c new->out_token.len = 0; new 135 net/sunrpc/auth_gss/svcauth_gss.c new->in_handle.len = item->in_handle.len; new 137 net/sunrpc/auth_gss/svcauth_gss.c new->in_token.len = item->in_token.len; new 139 net/sunrpc/auth_gss/svcauth_gss.c new->in_handle.data = item->in_handle.data; new 141 net/sunrpc/auth_gss/svcauth_gss.c new->in_token.data = item->in_token.data; new 147 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *new = container_of(cnew, struct rsi, h); new 150 net/sunrpc/auth_gss/svcauth_gss.c BUG_ON(new->out_handle.data || new->out_token.data); new 151 net/sunrpc/auth_gss/svcauth_gss.c new->out_handle.len = item->out_handle.len; new 153 net/sunrpc/auth_gss/svcauth_gss.c new->out_token.len = item->out_token.len; new 155 net/sunrpc/auth_gss/svcauth_gss.c new->out_handle.data = item->out_handle.data; new 157 net/sunrpc/auth_gss/svcauth_gss.c new->out_token.data = item->out_token.data; new 160 net/sunrpc/auth_gss/svcauth_gss.c new->major_status = item->major_status; new 161 net/sunrpc/auth_gss/svcauth_gss.c new->minor_status = item->minor_status; new 296 net/sunrpc/auth_gss/svcauth_gss.c int hash = rsi_hash(new); new 298 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_update(&rsi_cache, &new->h, new 368 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(a, struct rsc, h); new 371 net/sunrpc/auth_gss/svcauth_gss.c return netobj_equal(&new->handle, &tmp->handle); new 377 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(cnew, struct rsc, h); new 380 net/sunrpc/auth_gss/svcauth_gss.c new->handle.len = tmp->handle.len; new 382 net/sunrpc/auth_gss/svcauth_gss.c new->handle.data = tmp->handle.data; new 384 net/sunrpc/auth_gss/svcauth_gss.c new->mechctx = NULL; new 385 net/sunrpc/auth_gss/svcauth_gss.c new->cred.cr_group_info = NULL; new 391 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(cnew, struct rsc, h); new 394 net/sunrpc/auth_gss/svcauth_gss.c new->mechctx = tmp->mechctx; new 396 net/sunrpc/auth_gss/svcauth_gss.c memset(&new->seqdata, 0, sizeof(new->seqdata)); new 397 net/sunrpc/auth_gss/svcauth_gss.c spin_lock_init(&new->seqdata.sd_lock); new 398 net/sunrpc/auth_gss/svcauth_gss.c new->cred = tmp->cred; new 531 net/sunrpc/auth_gss/svcauth_gss.c int hash = rsc_hash(new); new 533 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_update(&rsc_cache, &new->h, new 754 net/sunrpc/auth_gss/svcauth_gss.c struct gss_domain *new; new 758 net/sunrpc/auth_gss/svcauth_gss.c new = kmalloc(sizeof(*new), GFP_KERNEL); new 759 net/sunrpc/auth_gss/svcauth_gss.c if (!new) new 761 net/sunrpc/auth_gss/svcauth_gss.c kref_init(&new->h.ref); new 762 net/sunrpc/auth_gss/svcauth_gss.c new->h.name = kstrdup(name, GFP_KERNEL); new 763 net/sunrpc/auth_gss/svcauth_gss.c if (!new->h.name) new 765 net/sunrpc/auth_gss/svcauth_gss.c new->h.flavour = &svcauthops_gss; new 766 net/sunrpc/auth_gss/svcauth_gss.c new->pseudoflavor = pseudoflavor; new 769 net/sunrpc/auth_gss/svcauth_gss.c test = auth_domain_lookup(name, &new->h); new 770 net/sunrpc/auth_gss/svcauth_gss.c if (test != &new->h) { /* Duplicate registration */ new 772 net/sunrpc/auth_gss/svcauth_gss.c kfree(new->h.name); new 778 net/sunrpc/auth_gss/svcauth_gss.c kfree(new); new 54 net/sunrpc/cache.c struct cache_head *new = NULL; new 71 net/sunrpc/cache.c new = detail->alloc(); new 72 net/sunrpc/cache.c if (!new) new 78 net/sunrpc/cache.c cache_init(new); new 79 net/sunrpc/cache.c detail->init(new, key); new 89 net/sunrpc/cache.c cache_put(new, detail); new 93 net/sunrpc/cache.c new->next = *head; new 94 net/sunrpc/cache.c *head = new; new 96 net/sunrpc/cache.c cache_get(new); new 99 net/sunrpc/cache.c return new; new 116 net/sunrpc/cache.c if (new) new 138 net/sunrpc/cache.c if (test_bit(CACHE_NEGATIVE, &new->flags)) new 141 net/sunrpc/cache.c detail->update(old, new); new 142 net/sunrpc/cache.c is_new = cache_fresh_locked(old, new->expiry_time); new 160 net/sunrpc/cache.c if (test_bit(CACHE_NEGATIVE, &new->flags)) new 163 net/sunrpc/cache.c detail->update(tmp, new); new 168 net/sunrpc/cache.c is_new = cache_fresh_locked(tmp, new->expiry_time); new 342 net/sunrpc/clnt.c struct rpc_clnt *new; new 345 net/sunrpc/clnt.c new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); new 346 net/sunrpc/clnt.c if (!new) new 348 net/sunrpc/clnt.c new->cl_parent = clnt; new 350 net/sunrpc/clnt.c new->cl_autobind = 0; new 351 net/sunrpc/clnt.c INIT_LIST_HEAD(&new->cl_tasks); new 352 net/sunrpc/clnt.c spin_lock_init(&new->cl_lock); new 353 net/sunrpc/clnt.c rpc_init_rtt(&new->cl_rtt_default, clnt->cl_timeout->to_initval); new 354 net/sunrpc/clnt.c new->cl_metrics = rpc_alloc_iostats(clnt); new 355 net/sunrpc/clnt.c if (new->cl_metrics == NULL) new 357 net/sunrpc/clnt.c kref_init(&new->cl_kref); new 358 net/sunrpc/clnt.c err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); new 361 net/sunrpc/clnt.c if (new->cl_auth) new 362 net/sunrpc/clnt.c atomic_inc(&new->cl_auth->au_count); new 365 net/sunrpc/clnt.c rpc_register_client(new); new 367 net/sunrpc/clnt.c return new; new 369 net/sunrpc/clnt.c rpc_free_iostats(new->cl_metrics); new 371 net/sunrpc/clnt.c kfree(new); new 118 net/sunrpc/stats.c struct rpc_iostats *new; new 119 net/sunrpc/stats.c new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); new 120 net/sunrpc/stats.c return new; new 155 net/sunrpc/svcauth.c if (new) new 156 net/sunrpc/svcauth.c hlist_add_head(&new->hash, head); new 158 net/sunrpc/svcauth.c return new; new 38 net/sunrpc/svcauth_unix.c struct unix_domain *new = NULL; new 43 net/sunrpc/svcauth_unix.c if (new && rv != &new->h) new 44 net/sunrpc/svcauth_unix.c auth_domain_put(&new->h); new 53 net/sunrpc/svcauth_unix.c new = kmalloc(sizeof(*new), GFP_KERNEL); new 54 net/sunrpc/svcauth_unix.c if (new == NULL) new 56 net/sunrpc/svcauth_unix.c kref_init(&new->h.ref); new 57 net/sunrpc/svcauth_unix.c new->h.name = kstrdup(name, GFP_KERNEL); new 58 net/sunrpc/svcauth_unix.c if (new->h.name == NULL) { new 59 net/sunrpc/svcauth_unix.c kfree(new); new 62 net/sunrpc/svcauth_unix.c new->h.flavour = &svcauth_unix; new 63 net/sunrpc/svcauth_unix.c new->addr_changes = 0; new 64 net/sunrpc/svcauth_unix.c rv = auth_domain_lookup(name, &new->h); new 127 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h); new 128 net/sunrpc/svcauth_unix.c return strcmp(orig->m_class, new->m_class) == 0 new 129 net/sunrpc/svcauth_unix.c && ipv6_addr_equal(&orig->m_addr, &new->m_addr); new 133 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h); new 136 net/sunrpc/svcauth_unix.c strcpy(new->m_class, item->m_class); new 137 net/sunrpc/svcauth_unix.c ipv6_addr_copy(&new->m_addr, &item->m_addr); new 141 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h); new 145 net/sunrpc/svcauth_unix.c new->m_client = item->m_client; new 146 net/sunrpc/svcauth_unix.c new->m_add_change = item->m_add_change; new 498 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h); new 499 net/sunrpc/svcauth_unix.c return orig->uid == new->uid; new 503 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h); new 505 net/sunrpc/svcauth_unix.c new->uid = item->uid; new 509 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h); new 513 net/sunrpc/svcauth_unix.c new->gi = item->gi; new 1876 net/sunrpc/xprtsock.c struct sock_xprt *new; new 1883 net/sunrpc/xprtsock.c new = kzalloc(sizeof(*new), GFP_KERNEL); new 1884 net/sunrpc/xprtsock.c if (new == NULL) { new 1889 net/sunrpc/xprtsock.c xprt = &new->xprt; new 1903 net/sunrpc/xprtsock.c memcpy(&new->addr, args->srcaddr, args->addrlen); new 247 net/x25/x25_facilities.c memcpy(new, ours, sizeof(*new)); new 259 net/x25/x25_facilities.c new->reverse = theirs.reverse; new 264 net/x25/x25_facilities.c new->throughput = theirs.throughput; new 271 net/x25/x25_facilities.c new->pacsize_in = theirs.pacsize_in; new 275 net/x25/x25_facilities.c new->pacsize_out = theirs.pacsize_out; new 282 net/x25/x25_facilities.c new->winsize_in = theirs.winsize_in; new 286 net/x25/x25_facilities.c new->winsize_out = theirs.winsize_out; new 192 scripts/mod/modpost.c struct symbol *new; new 195 scripts/mod/modpost.c new = symbolhash[hash] = alloc_symbol(name, 0, symbolhash[hash]); new 196 scripts/mod/modpost.c new->module = module; new 197 scripts/mod/modpost.c new->export = export; new 198 scripts/mod/modpost.c return new; new 84 security/device_cgroup.c struct dev_whitelist_item *wh, *tmp, *new; new 87 security/device_cgroup.c new = kmalloc(sizeof(*wh), GFP_KERNEL); new 88 security/device_cgroup.c if (!new) new 90 security/device_cgroup.c new->major = wh->major; new 91 security/device_cgroup.c new->minor = wh->minor; new 92 security/device_cgroup.c new->type = wh->type; new 93 security/device_cgroup.c new->access = wh->access; new 94 security/device_cgroup.c list_add_tail(&new->list, dest); new 298 security/selinux/avc.c list_replace_rcu(&old->list, &new->list); new 152 security/selinux/netif.c struct sel_netif *new = NULL; new 173 security/selinux/netif.c new = kzalloc(sizeof(*new), GFP_ATOMIC); new 174 security/selinux/netif.c if (new == NULL) { new 178 security/selinux/netif.c ret = security_netif_sid(dev->name, &new->nsec.sid); new 181 security/selinux/netif.c new->nsec.ifindex = ifindex; new 182 security/selinux/netif.c ret = sel_netif_insert(new); new 185 security/selinux/netif.c *sid = new->nsec.sid; new 195 security/selinux/netif.c kfree(new); new 218 security/selinux/netnode.c struct sel_netnode *new = NULL; new 227 security/selinux/netnode.c new = kzalloc(sizeof(*new), GFP_ATOMIC); new 228 security/selinux/netnode.c if (new == NULL) new 234 security/selinux/netnode.c new->nsec.addr.ipv4 = *(__be32 *)addr; new 239 security/selinux/netnode.c ipv6_addr_copy(&new->nsec.addr.ipv6, addr); new 247 security/selinux/netnode.c new->nsec.family = family; new 248 security/selinux/netnode.c new->nsec.sid = *sid; new 249 security/selinux/netnode.c sel_netnode_insert(new); new 257 security/selinux/netnode.c kfree(new); new 165 security/selinux/netport.c struct sel_netport *new = NULL; new 174 security/selinux/netport.c new = kzalloc(sizeof(*new), GFP_ATOMIC); new 175 security/selinux/netport.c if (new == NULL) new 181 security/selinux/netport.c new->psec.port = pnum; new 182 security/selinux/netport.c new->psec.protocol = protocol; new 183 security/selinux/netport.c new->psec.sid = *sid; new 184 security/selinux/netport.c sel_netport_insert(new); new 192 security/selinux/netport.c kfree(new); new 49 security/selinux/ss/ebitmap.c struct ebitmap_node *n, *new, *prev; new 55 security/selinux/ss/ebitmap.c new = kzalloc(sizeof(*new), GFP_ATOMIC); new 56 security/selinux/ss/ebitmap.c if (!new) { new 60 security/selinux/ss/ebitmap.c new->startbit = n->startbit; new 61 security/selinux/ss/ebitmap.c memcpy(new->maps, n->maps, EBITMAP_SIZE / 8); new 62 security/selinux/ss/ebitmap.c new->next = NULL; new 64 security/selinux/ss/ebitmap.c prev->next = new; new 66 security/selinux/ss/ebitmap.c dst->node = new; new 67 security/selinux/ss/ebitmap.c prev = new; new 263 security/selinux/ss/ebitmap.c struct ebitmap_node *n, *prev, *new; new 307 security/selinux/ss/ebitmap.c new = kzalloc(sizeof(*new), GFP_ATOMIC); new 308 security/selinux/ss/ebitmap.c if (!new) new 311 security/selinux/ss/ebitmap.c new->startbit = bit - (bit % EBITMAP_SIZE); new 312 security/selinux/ss/ebitmap.c ebitmap_node_set_bit(new, bit); new 316 security/selinux/ss/ebitmap.c e->highbit = new->startbit + EBITMAP_SIZE; new 319 security/selinux/ss/ebitmap.c new->next = prev->next; new 320 security/selinux/ss/ebitmap.c prev->next = new; new 322 security/selinux/ss/ebitmap.c new->next = e->node; new 323 security/selinux/ss/ebitmap.c e->node = new; new 876 sound/core/pcm_lib.c struct snd_pcm_hw_rule *new; new 878 sound/core/pcm_lib.c new = kcalloc(new_rules, sizeof(*c), GFP_KERNEL); new 879 sound/core/pcm_lib.c if (!new) new 882 sound/core/pcm_lib.c memcpy(new, constrs->rules, new 886 sound/core/pcm_lib.c constrs->rules = new; new 216 sound/i2c/l3/uda1341.c unsigned short old, new; new 228 sound/i2c/l3/uda1341.c new = (old & ~(mask << shift)) | (value << shift); new 229 sound/i2c/l3/uda1341.c change = old != new; new 231 sound/i2c/l3/uda1341.c if (flush) uda->write(clnt, reg, new); new 232 sound/i2c/l3/uda1341.c uda->regs[reg] = new; new 259 sound/isa/es18xx.c unsigned char old, new, oval; new 278 sound/isa/es18xx.c new = (old & ~mask) | (val & mask); new 279 sound/isa/es18xx.c ret = snd_es18xx_dsp_command(chip, new); new 284 sound/isa/es18xx.c reg, old, new, ret); new 324 sound/isa/es18xx.c unsigned char old, new, oval; new 331 sound/isa/es18xx.c new = (old & ~mask) | (val & mask); new 332 sound/isa/es18xx.c outb(new, chip->port + 0x05); new 335 sound/isa/es18xx.c reg, old, new); new 345 sound/isa/es18xx.c int old, expected, new; new 352 sound/isa/es18xx.c new = inb(chip->port + 0x05); new 356 sound/isa/es18xx.c reg, old, expected, new); new 358 sound/isa/es18xx.c return expected == new; new 231 sound/mips/hal2.c u32 old, new; new 240 sound/mips/hal2.c new = old & ~(H2I_C2_L_ATT_M | H2I_C2_R_ATT_M | H2I_C2_MUTE); new 244 sound/mips/hal2.c new |= (l << H2I_C2_L_ATT_SHIFT); new 245 sound/mips/hal2.c new |= (r << H2I_C2_R_ATT_SHIFT); new 247 sound/mips/hal2.c new |= H2I_C2_L_ATT_M | H2I_C2_R_ATT_M | H2I_C2_MUTE; new 248 sound/mips/hal2.c hal2_i_write32(hal2, H2I_DAC_C2, new); new 252 sound/mips/hal2.c new = old & ~(H2I_C2_L_GAIN_M | H2I_C2_R_GAIN_M); new 253 sound/mips/hal2.c new |= (l << H2I_C2_L_GAIN_SHIFT); new 254 sound/mips/hal2.c new |= (r << H2I_C2_R_GAIN_SHIFT); new 255 sound/mips/hal2.c hal2_i_write32(hal2, H2I_ADC_C2, new); new 258 sound/mips/hal2.c return old != new; new 391 sound/pci/ac97/ac97_codec.c unsigned short old, new; new 394 sound/pci/ac97/ac97_codec.c new = (old & ~mask) | (value & mask); new 395 sound/pci/ac97/ac97_codec.c change = old != new; new 397 sound/pci/ac97/ac97_codec.c ac97->regs[reg] = new; new 398 sound/pci/ac97/ac97_codec.c ac97->bus->ops->write(ac97, reg, new); new 407 sound/pci/ac97/ac97_codec.c unsigned short old, new, cfg; new 411 sound/pci/ac97/ac97_codec.c new = (old & ~mask) | (value & mask); new 412 sound/pci/ac97/ac97_codec.c change = old != new; new 416 sound/pci/ac97/ac97_codec.c ac97->spec.ad18xx.pcmreg[codec] = new; new 422 sound/pci/ac97/ac97_codec.c ac97->bus->ops->write(ac97, AC97_PCM, new); new 732 sound/pci/ac97/ac97_codec.c unsigned int new = 0; new 736 sound/pci/ac97/ac97_codec.c new = val = ucontrol->value.iec958.status[0] & (IEC958_AES0_PROFESSIONAL|IEC958_AES0_NONAUDIO); new 738 sound/pci/ac97/ac97_codec.c new |= ucontrol->value.iec958.status[0] & (IEC958_AES0_PRO_FS|IEC958_AES0_PRO_EMPHASIS_5015); new 739 sound/pci/ac97/ac97_codec.c switch (new & IEC958_AES0_PRO_FS) { new 745 sound/pci/ac97/ac97_codec.c if ((new & IEC958_AES0_PRO_EMPHASIS) == IEC958_AES0_PRO_EMPHASIS_5015) new 748 sound/pci/ac97/ac97_codec.c new |= ucontrol->value.iec958.status[0] & (IEC958_AES0_CON_EMPHASIS_5015|IEC958_AES0_CON_NOT_COPYRIGHT); new 749 sound/pci/ac97/ac97_codec.c new |= ((ucontrol->value.iec958.status[1] & (IEC958_AES1_CON_CATEGORY|IEC958_AES1_CON_ORIGINAL)) << 8); new 750 sound/pci/ac97/ac97_codec.c new |= ((ucontrol->value.iec958.status[3] & IEC958_AES3_CON_FS) << 24); new 751 sound/pci/ac97/ac97_codec.c if ((new & IEC958_AES0_CON_EMPHASIS) == IEC958_AES0_CON_EMPHASIS_5015) new 753 sound/pci/ac97/ac97_codec.c if (!(new & IEC958_AES0_CON_NOT_COPYRIGHT)) new 755 sound/pci/ac97/ac97_codec.c val |= ((new >> 8) & 0xff) << 4; // category + original new 756 sound/pci/ac97/ac97_codec.c switch ((new >> 24) & 0xff) { new 765 sound/pci/ac97/ac97_codec.c change = ac97->spdif_status != new; new 766 sound/pci/ac97/ac97_codec.c ac97->spdif_status = new; new 778 sound/pci/ac97/ac97_codec.c v = new & (IEC958_AES0_CON_EMPHASIS_5015|IEC958_AES0_CON_NOT_COPYRIGHT) ? 0 : AC97_CXR_COPYRGT; new 779 sound/pci/ac97/ac97_codec.c v |= new & IEC958_AES0_NONAUDIO ? AC97_CXR_SPDIF_AC3 : AC97_CXR_SPDIF_PCM; new 810 sound/pci/ac97/ac97_codec.c unsigned short value, old, new; new 819 sound/pci/ac97/ac97_codec.c new = (old & ~mask) | value; new 820 sound/pci/ac97/ac97_codec.c change = old != new; new 321 sound/pci/azt3328.c u8 prev = inb(reg), new; new 323 sound/pci/azt3328.c new = (do_set) ? (prev|mask) : (prev & ~mask); new 326 sound/pci/azt3328.c outb(new, reg); new 327 sound/pci/azt3328.c if (new != prev) new 297 sound/pci/es1938.c unsigned char old, new, oval; new 303 sound/pci/es1938.c new = (old & ~mask) | (val & mask); new 304 sound/pci/es1938.c outb(new, SLSB_REG(chip, MIXERDATA)); new 307 sound/pci/es1938.c reg, old, new); new 384 sound/pci/es1938.c unsigned char old, new, oval; new 392 sound/pci/es1938.c new = (old & ~mask) | (val & mask); new 393 sound/pci/es1938.c snd_es1938_write_cmd(chip, new); new 396 sound/pci/es1938.c reg, old, new); new 787 sound/pci/es1938.c size_t old, new; new 790 sound/pci/es1938.c while ((new = inw(SLDM_REG(chip, DMACOUNT))) != old) new 791 sound/pci/es1938.c old = new; new 792 sound/pci/es1938.c ptr = chip->dma1_size - 1 - new; new 828 sound/pci/es1938.c size_t old, new; new 832 sound/pci/es1938.c while ((new = inw(SLDM_REG(chip, DMACOUNT))) != old) new 833 sound/pci/es1938.c old = new; new 834 sound/pci/es1938.c ptr = chip->dma1_size - 1 - new; new 222 sound/pci/fm801.c unsigned short old, new; new 226 sound/pci/fm801.c new = (old & ~mask) | value; new 227 sound/pci/fm801.c change = old != new; new 229 sound/pci/fm801.c outw(new, chip->port + reg); new 1034 sound/pci/ice1712/aureon.c unsigned short new, old; new 1040 sound/pci/ice1712/aureon.c new = (~ucontrol->value.integer.value[i]<<5&0x20) | (old&~0x20); new 1041 sound/pci/ice1712/aureon.c if (new != old) { new 1042 sound/pci/ice1712/aureon.c wm_put(ice, WM_ADC_GAIN + i, new); new 529 sound/pci/ice1712/juli.c unsigned int old, new; new 533 sound/pci/ice1712/juli.c new = (old & ~GPIO_RATE_MASK) | get_gpio_val(rate); new 538 sound/pci/ice1712/juli.c ice->gpio.set_data(ice, new); new 96 sound/pci/ice1712/prodigy192.c unsigned char new, old; new 99 sound/pci/ice1712/prodigy192.c new = (~mute << 7 & 0x80) | (old & ~0x80); new 100 sound/pci/ice1712/prodigy192.c change = (new != old); new 103 sound/pci/ice1712/prodigy192.c stac9460_put(ice, idx, new); new 216 sound/pci/ice1712/prodigy192.c unsigned char new, old; new 223 sound/pci/ice1712/prodigy192.c new = (~ucontrol->value.integer.value[i]<<7&0x80) | (old&~0x80); new 224 sound/pci/ice1712/prodigy192.c change = (new != old); new 226 sound/pci/ice1712/prodigy192.c stac9460_put(ice, reg, new); new 310 sound/pci/ice1712/prodigy192.c unsigned char new, old; new 313 sound/pci/ice1712/prodigy192.c new = (ucontrol->value.enumerated.item[0] << 7 & 0x80) | (old & ~0x80); new 314 sound/pci/ice1712/prodigy192.c change = (new != old); new 316 sound/pci/ice1712/prodigy192.c stac9460_put(ice, STAC946X_GENERAL_PURPOSE, new); new 324 sound/pci/ice1712/prodigy192.c unsigned char old, new; new 332 sound/pci/ice1712/prodigy192.c new = 0x08; /* 256x, base rate mode */ new 334 sound/pci/ice1712/prodigy192.c new = 0x11; /* 256x, mid rate mode */ new 336 sound/pci/ice1712/prodigy192.c new = 0x12; /* 128x, high rate mode */ new 338 sound/pci/ice1712/prodigy192.c if (old == new) new 348 sound/pci/ice1712/prodigy192.c stac9460_put(ice, STAC946X_MASTER_CLOCKING, new); new 590 sound/pci/ice1712/prodigy192.c unsigned char new, old, itemvalue; new 597 sound/pci/ice1712/prodigy192.c new = (itemvalue & AK4114_IPS0) | (old & ~AK4114_IPS0); new 598 sound/pci/ice1712/prodigy192.c change = (new != old); new 600 sound/pci/ice1712/prodigy192.c prodigy192_ak4114_write(ice, AK4114_REG_IO1, new); new 101 sound/pci/ice1712/wtm.c unsigned char new, old; new 108 sound/pci/ice1712/wtm.c new = (~ucontrol->value.integer.value[0] << 7 & 0x80) | new 110 sound/pci/ice1712/wtm.c change = (new != old); new 112 sound/pci/ice1712/wtm.c stac9460_put(ice, idx, new); new 113 sound/pci/ice1712/wtm.c stac9460_2_put(ice, idx, new); new 122 sound/pci/ice1712/wtm.c new = (~ucontrol->value.integer.value[0] << 7 & 0x80) | new 124 sound/pci/ice1712/wtm.c change = (new != old); new 127 sound/pci/ice1712/wtm.c stac9460_put(ice, idx, new); new 129 sound/pci/ice1712/wtm.c stac9460_2_put(ice, idx - 6, new); new 241 sound/pci/ice1712/wtm.c unsigned char new, old; new 250 sound/pci/ice1712/wtm.c new = (~ucontrol->value.integer.value[i]<<7&0x80) | new 252 sound/pci/ice1712/wtm.c change = (new != old); new 254 sound/pci/ice1712/wtm.c stac9460_put(ice, reg, new); new 260 sound/pci/ice1712/wtm.c new = (~ucontrol->value.integer.value[i]<<7&0x80) | new 262 sound/pci/ice1712/wtm.c change = (new != old); new 264 sound/pci/ice1712/wtm.c stac9460_2_put(ice, reg, new); new 366 sound/pci/ice1712/wtm.c unsigned char new, old; new 374 sound/pci/ice1712/wtm.c new = (~ucontrol->value.integer.value[0] << 7 & 0x80) | (old & ~0x80); new 375 sound/pci/ice1712/wtm.c change = (new != old); new 378 sound/pci/ice1712/wtm.c stac9460_put(ice, STAC946X_GENERAL_PURPOSE, new); new 380 sound/pci/ice1712/wtm.c stac9460_2_put(ice, STAC946X_GENERAL_PURPOSE, new); new 1071 sound/soc/soc-core.c unsigned short old, new; new 1075 sound/soc/soc-core.c new = (old & ~mask) | value; new 1076 sound/soc/soc-core.c change = old != new; new 1078 sound/soc/soc-core.c snd_soc_write(codec, reg, new); new 1101 sound/soc/soc-core.c unsigned short old, new; new 1105 sound/soc/soc-core.c new = (old & ~mask) | value; new 1106 sound/soc/soc-core.c change = old != new; new 212 sound/soc/soc-dapm.c unsigned short old, new; new 229 sound/soc/soc-dapm.c new = (old & ~(0x1 << widget->shift)) | (power << widget->shift); new 231 sound/soc/soc-dapm.c change = old != new; new 235 sound/soc/soc-dapm.c snd_soc_write(codec, widget->reg, new); new 239 sound/soc/soc-dapm.c old, new, change); new 1079 sound/soc/soc-dapm.c if (w->new) new 1107 sound/soc/soc-dapm.c w->new = 1; new 453 virt/kvm/kvm_main.c struct kvm_memory_slot old, new; new 473 virt/kvm/kvm_main.c new = old = *memslot; new 475 virt/kvm/kvm_main.c new.base_gfn = base_gfn; new 476 virt/kvm/kvm_main.c new.npages = npages; new 477 virt/kvm/kvm_main.c new.flags = mem->flags; new 497 virt/kvm/kvm_main.c if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) new 498 virt/kvm/kvm_main.c new.dirty_bitmap = NULL; new 504 virt/kvm/kvm_main.c if (npages && !new.rmap) { new 505 virt/kvm/kvm_main.c new.rmap = vmalloc(npages * sizeof(struct page *)); new 507 virt/kvm/kvm_main.c if (!new.rmap) new 510 virt/kvm/kvm_main.c memset(new.rmap, 0, npages * sizeof(*new.rmap)); new 512 virt/kvm/kvm_main.c new.user_alloc = user_alloc; new 519 virt/kvm/kvm_main.c new.userspace_addr = mem->userspace_addr; new 521 virt/kvm/kvm_main.c new.userspace_addr = 0; new 523 virt/kvm/kvm_main.c if (npages && !new.lpage_info) { new 530 virt/kvm/kvm_main.c new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info)); new 532 virt/kvm/kvm_main.c if (!new.lpage_info) new 535 virt/kvm/kvm_main.c memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info)); new 538 virt/kvm/kvm_main.c new.lpage_info[0].write_count = 1; new 540 virt/kvm/kvm_main.c new.lpage_info[largepages-1].write_count = 1; new 544 virt/kvm/kvm_main.c if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { new 547 virt/kvm/kvm_main.c new.dirty_bitmap = vmalloc(dirty_bytes); new 548 virt/kvm/kvm_main.c if (!new.dirty_bitmap) new 550 virt/kvm/kvm_main.c memset(new.dirty_bitmap, 0, dirty_bytes); new 561 virt/kvm/kvm_main.c *memslot = new; new 572 virt/kvm/kvm_main.c kvm_free_physmem_slot(&old, &new); new 576 virt/kvm/kvm_main.c kvm_free_physmem_slot(&new, &old);