l 235 arch/x86/boot/boot.h #define memcpy(d,s,l) __builtin_memcpy(d,s,l) l 236 arch/x86/boot/boot.h #define memset(d,c,l) __builtin_memset(d,c,l) l 827 arch/x86/kernel/apic_32.c unsigned int l, h; l 829 arch/x86/kernel/apic_32.c rdmsr(MSR_IA32_APICBASE, l, h); l 830 arch/x86/kernel/apic_32.c l &= ~MSR_IA32_APICBASE_ENABLE; l 831 arch/x86/kernel/apic_32.c wrmsr(MSR_IA32_APICBASE, l, h); l 1185 arch/x86/kernel/apic_32.c u32 h, l, features; l 1221 arch/x86/kernel/apic_32.c rdmsr(MSR_IA32_APICBASE, l, h); l 1222 arch/x86/kernel/apic_32.c if (!(l & MSR_IA32_APICBASE_ENABLE)) { l 1225 arch/x86/kernel/apic_32.c l &= ~MSR_IA32_APICBASE_BASE; l 1226 arch/x86/kernel/apic_32.c l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE; l 1227 arch/x86/kernel/apic_32.c wrmsr(MSR_IA32_APICBASE, l, h); l 1244 arch/x86/kernel/apic_32.c rdmsr(MSR_IA32_APICBASE, l, h); l 1245 arch/x86/kernel/apic_32.c if (l & MSR_IA32_APICBASE_ENABLE) l 1246 arch/x86/kernel/apic_32.c mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; l 1632 arch/x86/kernel/apic_32.c unsigned int l, h; l 1655 arch/x86/kernel/apic_32.c rdmsr(MSR_IA32_APICBASE, l, h); l 1656 arch/x86/kernel/apic_32.c l &= ~MSR_IA32_APICBASE_BASE; l 1657 arch/x86/kernel/apic_32.c l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; l 1658 arch/x86/kernel/apic_32.c wrmsr(MSR_IA32_APICBASE, l, h); l 712 arch/x86/kernel/apic_64.c unsigned int l, h; l 714 arch/x86/kernel/apic_64.c rdmsr(MSR_IA32_APICBASE, l, h); l 715 arch/x86/kernel/apic_64.c l &= ~MSR_IA32_APICBASE_ENABLE; l 716 arch/x86/kernel/apic_64.c wrmsr(MSR_IA32_APICBASE, l, h); l 1575 arch/x86/kernel/apic_64.c unsigned int l, h; l 1598 arch/x86/kernel/apic_64.c rdmsr(MSR_IA32_APICBASE, l, h); l 1599 arch/x86/kernel/apic_64.c l &= ~MSR_IA32_APICBASE_BASE; l 1600 arch/x86/kernel/apic_64.c l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; l 1601 arch/x86/kernel/apic_64.c wrmsr(MSR_IA32_APICBASE, l, h); l 56 arch/x86/kernel/cpu/amd.c u32 l, h; l 103 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K6_WHCR, l, h); l 104 arch/x86/kernel/cpu/amd.c if ((l&0x0000FFFF) == 0) { l 106 arch/x86/kernel/cpu/amd.c l = (1<<0)|((mbytes/4)<<1); l 109 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K6_WHCR, l, h); l 124 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K6_WHCR, l, h); l 125 arch/x86/kernel/cpu/amd.c if ((l&0xFFFF0000) == 0) { l 127 arch/x86/kernel/cpu/amd.c l = ((mbytes>>2)<<22)|(1<<16); l 130 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K6_WHCR, l, h); l 148 arch/x86/kernel/cpu/amd.c u32 l, h; l 158 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K7_HWCR, l, h); l 159 arch/x86/kernel/cpu/amd.c l &= ~0x00008000; l 160 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K7_HWCR, l, h); l 171 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K7_CLK_CTL, l, h); l 172 arch/x86/kernel/cpu/amd.c if ((l & 0xfff00000) != 0x20000000) { l 173 arch/x86/kernel/cpu/amd.c printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, l 174 arch/x86/kernel/cpu/amd.c ((l & 0x000fffff)|0x20000000)); l 175 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); l 59 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c u32 l, h; l 64 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); l 66 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c if (l & 0x01) l 72 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); l 75 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); l 84 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c l = (l & ~14); l 85 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c l = l | (1<<4) | ((newstate & 0x7)<<1); l 86 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); l 247 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c u32 l, h; l 249 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); l 251 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c if (l & 0x10) { l 252 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c l = l >> 1; l 253 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c l &= 0x7; l 255 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c l = DC_DISABLE; l 257 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c if (l != DC_DISABLE) l 258 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c return (stock_freq * l / 8); l 324 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c unsigned l, h; l 333 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_PERF_STATUS, l, h); l 334 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c clock_freq = extract_clock(l, cpu, 0); l 343 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_PERF_CTL, l, h); l 344 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c clock_freq = extract_clock(l, cpu, 1); l 356 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c unsigned l, h; l 391 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 393 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c if (!(l & (1<<16))) { l 394 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c l |= (1<<16); l 395 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c dprintk("trying to enable Enhanced SpeedStep (%x)\n", l); l 396 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c wrmsr(MSR_IA32_MISC_ENABLE, l, h); l 399 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 400 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c if (!(l & (1<<16))) { l 75 arch/x86/kernel/cpu/mcheck/k7.c u32 l, h; l 85 arch/x86/kernel/cpu/mcheck/k7.c rdmsr(MSR_IA32_MCG_CAP, l, h); l 86 arch/x86/kernel/cpu/mcheck/k7.c if (l & (1<<8)) /* Control register present ? */ l 88 arch/x86/kernel/cpu/mcheck/k7.c nr_mce_banks = l & 0xff; l 35 arch/x86/kernel/cpu/mcheck/mce_intel_64.c u32 l, h; l 49 arch/x86/kernel/cpu/mcheck/mce_intel_64.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 51 arch/x86/kernel/cpu/mcheck/mce_intel_64.c if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { l 57 arch/x86/kernel/cpu/mcheck/mce_intel_64.c if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) l 71 arch/x86/kernel/cpu/mcheck/mce_intel_64.c rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); l 72 arch/x86/kernel/cpu/mcheck/mce_intel_64.c wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); l 74 arch/x86/kernel/cpu/mcheck/mce_intel_64.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 75 arch/x86/kernel/cpu/mcheck/mce_intel_64.c wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); l 77 arch/x86/kernel/cpu/mcheck/mce_intel_64.c l = apic_read(APIC_LVTTHMR); l 78 arch/x86/kernel/cpu/mcheck/mce_intel_64.c apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); l 71 arch/x86/kernel/cpu/mcheck/p4.c u32 l, h; l 86 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 88 arch/x86/kernel/cpu/mcheck/p4.c if ((l & (1<<3)) && (h & APIC_DM_SMI)) { l 107 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); l 108 arch/x86/kernel/cpu/mcheck/p4.c wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h); l 113 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MISC_ENABLE, l, h); l 114 arch/x86/kernel/cpu/mcheck/p4.c wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); l 116 arch/x86/kernel/cpu/mcheck/p4.c l = apic_read(APIC_LVTTHMR); l 117 arch/x86/kernel/cpu/mcheck/p4.c apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); l 223 arch/x86/kernel/cpu/mcheck/p4.c u32 l, h; l 230 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_CAP, l, h); l 231 arch/x86/kernel/cpu/mcheck/p4.c if (l & (1<<8)) /* Control register present ? */ l 233 arch/x86/kernel/cpu/mcheck/p4.c nr_mce_banks = l & 0xff; l 245 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_CAP, l, h); l 246 arch/x86/kernel/cpu/mcheck/p4.c if (l & (1<<9)) {/* MCG_EXT_P */ l 247 arch/x86/kernel/cpu/mcheck/p4.c mce_num_extended_msrs = (l >> 16) & 0xff; l 33 arch/x86/kernel/cpu/mcheck/p5.c u32 l, h; l 46 arch/x86/kernel/cpu/mcheck/p5.c rdmsr(MSR_IA32_P5_MC_ADDR, l, h); l 47 arch/x86/kernel/cpu/mcheck/p5.c rdmsr(MSR_IA32_P5_MC_TYPE, l, h); l 87 arch/x86/kernel/cpu/mcheck/p6.c u32 l, h; l 103 arch/x86/kernel/cpu/mcheck/p6.c rdmsr(MSR_IA32_MCG_CAP, l, h); l 104 arch/x86/kernel/cpu/mcheck/p6.c if (l & (1<<8)) /* Control register present ? */ l 106 arch/x86/kernel/cpu/mcheck/p6.c nr_mce_banks = l & 0xff; l 1518 arch/x86/kernel/cpu/mtrr/main.c u32 l, h; l 1525 arch/x86/kernel/cpu/mtrr/main.c if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) l 1531 arch/x86/kernel/cpu/mtrr/main.c if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) == l 1149 arch/x86/kernel/ptrace.c case offsetof(struct user32, regs.l): \ l 1215 arch/x86/kernel/ptrace.c case offsetof(struct user32, regs.l): \ l 120 arch/x86/kernel/tls.c info->lm = desc->l; l 565 arch/x86/kernel/vmi_32.c unsigned l, h; l 566 arch/x86/kernel/vmi_32.c rdmsr(MSR_EFER, l, h); l 567 arch/x86/kernel/vmi_32.c ap.efer = (unsigned long long) h << 32 | l; l 55 arch/x86/kvm/i8254.c } l; l 60 arch/x86/kvm/i8254.c rl = (u64)u.l.low * (u64)b; l 61 arch/x86/kvm/i8254.c rh = (u64)u.l.high * (u64)b; l 63 arch/x86/kvm/i8254.c res.l.high = div64_u64(rh, c); l 64 arch/x86/kvm/i8254.c res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); l 789 arch/x86/kvm/svm.c var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; l 911 arch/x86/kvm/svm.c s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; l 1616 arch/x86/kvm/vmx.c var->l = (ar >> 13) & 1; l 1648 arch/x86/kvm/vmx.c ar |= (var->l & 1) << 13; l 1691 arch/x86/kvm/vmx.c *l = (ar >> 13) & 1; l 3104 arch/x86/kvm/x86.c *l = cs.l; l 3200 arch/x86/kvm/x86.c kvm_desct->l = seg_desc->l; l 8 arch/x86/lib/msr-on-cpu.c u32 l, h; l 16 arch/x86/lib/msr-on-cpu.c rdmsr(rv->msr_no, rv->l, rv->h); l 23 arch/x86/lib/msr-on-cpu.c wrmsr(rv->msr_no, rv->l, rv->h); l 33 arch/x86/lib/msr-on-cpu.c *l = rv.l; l 45 arch/x86/lib/msr-on-cpu.c rv.l = l; l 58 arch/x86/lib/msr-on-cpu.c rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); l 65 arch/x86/lib/msr-on-cpu.c rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); l 75 arch/x86/lib/msr-on-cpu.c *l = rv.l; l 87 arch/x86/lib/msr-on-cpu.c rv.l = l; l 19 arch/x86/math-emu/reg_constant.c #define MAKE_REG(s, e, l, h) { l, h, \ l 302 arch/x86/math-emu/reg_ld_str.c long long l = 0; l 309 arch/x86/math-emu/reg_ld_str.c l *= 10; l 313 arch/x86/math-emu/reg_ld_str.c l += bcd >> 4; l 314 arch/x86/math-emu/reg_ld_str.c l *= 10; l 315 arch/x86/math-emu/reg_ld_str.c l += bcd & 0x0f; l 323 arch/x86/math-emu/reg_ld_str.c if (l == 0) { l 328 arch/x86/math-emu/reg_ld_str.c significand(st0_ptr) = l; l 380 arch/x86/math-emu/reg_ld_str.c unsigned long l[2]; l 386 arch/x86/math-emu/reg_ld_str.c l[0] = 0; l 387 arch/x86/math-emu/reg_ld_str.c l[1] = 0; l 416 arch/x86/math-emu/reg_ld_str.c l[0] = tmp.sigl; l 417 arch/x86/math-emu/reg_ld_str.c l[1] = tmp.sigh; l 466 arch/x86/math-emu/reg_ld_str.c l[0] = (tmp.sigl >> 11) | (tmp.sigh << 21); l 467 arch/x86/math-emu/reg_ld_str.c l[1] = ((tmp.sigh >> 11) & 0xfffff); l 480 arch/x86/math-emu/reg_ld_str.c l[1] = 0x7ff00000; /* Set to + INF */ l 489 arch/x86/math-emu/reg_ld_str.c l[1] |= (((exp + DOUBLE_Ebias) & 0x7ff) << 20); l 508 arch/x86/math-emu/reg_ld_str.c l[1] = 0x7ff00000; l 514 arch/x86/math-emu/reg_ld_str.c l[0] = l 517 arch/x86/math-emu/reg_ld_str.c l[1] = ((st0_ptr->sigh >> 11) & 0xfffff); l 523 arch/x86/math-emu/reg_ld_str.c l[1] |= (0x40000000 >> 11); l 525 arch/x86/math-emu/reg_ld_str.c l[1] |= 0x7ff00000; l 531 arch/x86/math-emu/reg_ld_str.c l[1] = 0xfff80000; l 551 arch/x86/math-emu/reg_ld_str.c l[1] |= 0x80000000; l 555 arch/x86/math-emu/reg_ld_str.c FPU_put_user(l[0], (unsigned long __user *)dfloat); l 556 arch/x86/math-emu/reg_ld_str.c FPU_put_user(l[1], 1 + (unsigned long __user *)dfloat); l 598 arch/x86/mm/init_32.c unsigned int v[4], l, h; l 604 arch/x86/mm/init_32.c rdmsr(MSR_EFER, l, h); l 605 arch/x86/mm/init_32.c l |= EFER_NX; l 606 arch/x86/mm/init_32.c wrmsr(MSR_EFER, l, h); l 30 arch/x86/oprofile/op_model_amd.c #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) l 31 arch/x86/oprofile/op_model_amd.c #define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) l 35 arch/x86/oprofile/op_model_amd.c #define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) l 36 arch/x86/oprofile/op_model_amd.c #define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0) l 371 arch/x86/oprofile/op_model_p4.c #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0) l 372 arch/x86/oprofile/op_model_p4.c #define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0) l 26 arch/x86/oprofile/op_model_ppro.c #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) l 28 arch/x86/oprofile/op_model_ppro.c do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0); } while (0) l 32 arch/x86/oprofile/op_model_ppro.c #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) l 33 arch/x86/oprofile/op_model_ppro.c #define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) l 22 arch/x86/pci/legacy.c u32 l; l 27 arch/x86/pci/legacy.c if (!raw_pci_read(0, n, devfn, PCI_VENDOR_ID, 2, &l) && l 28 arch/x86/pci/legacy.c l != 0x0000 && l != 0xffff) { l 29 arch/x86/pci/legacy.c DBG("Found device at %02x:%02x [%04x]\n", n, devfn, l); l 179 arch/x86/pci/mmconfig-shared.c u32 l; l 195 arch/x86/pci/mmconfig-shared.c raw_pci_ops->read(0, bus, devfn, 0, 4, &l); l 196 arch/x86/pci/mmconfig-shared.c vendor = l & 0xffff; l 197 arch/x86/pci/mmconfig-shared.c device = (l >> 16) & 0xffff; l 50 arch/x86/xen/time.c u32 h, l; l 61 arch/x86/xen/time.c l = p32[0]; l 65 arch/x86/xen/time.c ret = (((u64)h) << 32) | l; l 307 block/blk-settings.c #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) l 585 crypto/cast5.c u32 l, r, t; l 596 crypto/cast5.c l = be32_to_cpu(src[0]); l 608 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); l 609 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); l 610 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); l 611 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); l 612 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); l 613 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); l 614 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); l 615 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); l 616 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); l 617 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); l 618 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); l 619 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); l 620 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); l 621 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); l 622 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); l 623 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); l 625 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); l 626 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); l 627 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); l 628 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); l 629 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); l 630 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); l 631 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); l 632 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); l 633 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); l 634 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); l 635 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); l 636 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); l 642 crypto/cast5.c dst[1] = cpu_to_be32(l); l 650 crypto/cast5.c u32 l, r, t; l 658 crypto/cast5.c l = be32_to_cpu(src[0]); l 662 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); l 663 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); l 664 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); l 665 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); l 666 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); l 667 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); l 668 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); l 669 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); l 670 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); l 671 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); l 672 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); l 673 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); l 674 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); l 675 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); l 676 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); l 677 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); l 679 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); l 680 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); l 681 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); l 682 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); l 683 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); l 684 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); l 685 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); l 686 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); l 687 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); l 688 crypto/cast5.c t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); l 689 crypto/cast5.c t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); l 690 crypto/cast5.c t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); l 694 crypto/cast5.c dst[1] = cpu_to_be32(l); l 128 crypto/ccm.c unsigned int l = lp + 1; l 142 crypto/ccm.c return set_msg_len(info + 16 - l, cryptlen, l); l 47 crypto/digest.c unsigned int l = sg->length; l 49 crypto/digest.c if (unlikely(l > nbytes)) l 50 crypto/digest.c l = nbytes; l 51 crypto/digest.c nbytes -= l; l 54 crypto/digest.c unsigned int bytes_from_page = min(l, ((unsigned int) l 68 crypto/digest.c l -= bytes; l 76 crypto/digest.c l -= bytes_from_page; l 77 crypto/digest.c } while (l > 0); l 228 crypto/fcrypt.c union lc4 { __be32 l; u8 c[4]; } u; \ l 229 crypto/fcrypt.c u.l = sched ^ R; \ l 240 crypto/fcrypt.c __be32 l, r; l 245 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); l 246 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); l 247 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); l 248 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); l 249 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); l 250 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); l 251 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); l 252 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x7]); l 253 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x8]); l 254 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x9]); l 255 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xa]); l 256 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xb]); l 257 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xc]); l 258 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xd]); l 259 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xe]); l 260 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xf]); l 272 crypto/fcrypt.c __be32 l, r; l 277 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xf]); l 278 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xe]); l 279 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xd]); l 280 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xc]); l 281 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0xb]); l 282 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0xa]); l 283 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x9]); l 284 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x8]); l 285 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x7]); l 286 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); l 287 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); l 288 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); l 289 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); l 290 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); l 291 crypto/fcrypt.c F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); l 292 crypto/fcrypt.c F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); l 25 crypto/michael_mic.c u32 l, r; l 37 crypto/michael_mic.c r ^= rol32(l, 17); \ l 38 crypto/michael_mic.c l += r; \ l 39 crypto/michael_mic.c r ^= xswap(l); \ l 40 crypto/michael_mic.c l += r; \ l 41 crypto/michael_mic.c r ^= rol32(l, 3); \ l 42 crypto/michael_mic.c l += r; \ l 43 crypto/michael_mic.c r ^= ror32(l, 2); \ l 44 crypto/michael_mic.c l += r; \ l 74 crypto/michael_mic.c mctx->l ^= le32_to_cpup(src); l 75 crypto/michael_mic.c michael_block(mctx->l, mctx->r); l 82 crypto/michael_mic.c mctx->l ^= le32_to_cpup(src++); l 83 crypto/michael_mic.c michael_block(mctx->l, mctx->r); l 103 crypto/michael_mic.c mctx->l ^= 0x5a; l 106 crypto/michael_mic.c mctx->l ^= data[0] | 0x5a00; l 109 crypto/michael_mic.c mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000; l 112 crypto/michael_mic.c mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) | l 116 crypto/michael_mic.c michael_block(mctx->l, mctx->r); l 118 crypto/michael_mic.c michael_block(mctx->l, mctx->r); l 120 crypto/michael_mic.c dst[0] = cpu_to_le32(mctx->l); l 136 crypto/michael_mic.c mctx->l = le32_to_cpu(data[0]); l 550 crypto/twofish_common.c x = CALC_K_2 (k, l, k, l, 0); \ l 563 crypto/twofish_common.c x = CALC_K192_2 (l, l, k, k, 0); \ l 576 crypto/twofish_common.c x = CALC_K256_2 (k, l, 0); \ l 111 fs/9p/fid.c int i, n, l, clone, any, access; l 183 fs/9p/fid.c l = min(n - i, P9_MAXWELEM); l 184 fs/9p/fid.c fid = p9_client_walk(fid, l, &wnames[i], clone); l 190 fs/9p/fid.c i += l; l 71 fs/binfmt_misc.c struct list_head *l; l 73 fs/binfmt_misc.c list_for_each(l, &entries) { l 74 fs/binfmt_misc.c Node *e = list_entry(l, Node, list); l 201 fs/cifs/smbdes.c char l[32], r[32]; l 237 fs/cifs/smbdes.c l[j] = pd1[j]; l 285 fs/cifs/smbdes.c xor(r2, l, pcb, 32); l 288 fs/cifs/smbdes.c l[j] = r[j]; l 296 fs/cifs/smbdes.c concat(rl, r, l, 32, 32); l 1457 fs/compat.c unsigned long h, l; l 1458 fs/compat.c if (__get_user(l, ufdset) || __get_user(h, ufdset+1)) l 1461 fs/compat.c *fdset++ = h << 32 | l; l 1489 fs/compat.c unsigned long h, l; l 1490 fs/compat.c l = *fdset++; l 1491 fs/compat.c h = l >> 32; l 1492 fs/compat.c if (__put_user(l, ufdset) || __put_user(h, ufdset+1)) l 154 fs/ext2/namei.c unsigned l = strlen(symname)+1; l 157 fs/ext2/namei.c if (l > sb->s_blocksize) l 165 fs/ext2/namei.c if (l > sizeof (EXT2_I(inode)->i_data)) { l 172 fs/ext2/namei.c err = page_symlink(inode, symname, l); l 178 fs/ext2/namei.c memcpy((char*)(EXT2_I(inode)->i_data),symname,l); l 179 fs/ext2/namei.c inode->i_size = l-1; l 2161 fs/ext3/namei.c int l, err, retries = 0; l 2163 fs/ext3/namei.c l = strlen(symname)+1; l 2164 fs/ext3/namei.c if (l > dir->i_sb->s_blocksize) l 2182 fs/ext3/namei.c if (l > sizeof (EXT3_I(inode)->i_data)) { l 2190 fs/ext3/namei.c err = __page_symlink(inode, symname, l, l 2200 fs/ext3/namei.c memcpy((char*)&EXT3_I(inode)->i_data,symname,l); l 2201 fs/ext3/namei.c inode->i_size = l-1; l 368 fs/ext3/super.c return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode; l 373 fs/ext3/super.c struct list_head *l; l 379 fs/ext3/super.c list_for_each(l, &sbi->s_orphan) { l 380 fs/ext3/super.c struct inode *inode = orphan_list_entry(l); l 349 fs/ext4/extents.c int k, l = path->p_depth; l 352 fs/ext4/extents.c for (k = 0; k <= l; k++, path++) { l 413 fs/ext4/extents.c struct ext4_extent_idx *r, *l, *m; l 418 fs/ext4/extents.c l = EXT_FIRST_INDEX(eh) + 1; l 420 fs/ext4/extents.c while (l <= r) { l 421 fs/ext4/extents.c m = l + (r - l) / 2; l 425 fs/ext4/extents.c l = m + 1; l 426 fs/ext4/extents.c ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), l 431 fs/ext4/extents.c path->p_idx = l - 1; l 473 fs/ext4/extents.c struct ext4_extent *r, *l, *m; l 485 fs/ext4/extents.c l = EXT_FIRST_EXTENT(eh) + 1; l 488 fs/ext4/extents.c while (l <= r) { l 489 fs/ext4/extents.c m = l + (r - l) / 2; l 493 fs/ext4/extents.c l = m + 1; l 494 fs/ext4/extents.c ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), l 499 fs/ext4/extents.c path->p_ext = l - 1; l 1895 fs/ext4/mballoc.c int l = *pos; l 1897 fs/ext4/mballoc.c if (l == 0) l 1902 fs/ext4/mballoc.c while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL); l 2192 fs/ext4/namei.c int l, err, retries = 0; l 2194 fs/ext4/namei.c l = strlen(symname)+1; l 2195 fs/ext4/namei.c if (l > dir->i_sb->s_blocksize) l 2213 fs/ext4/namei.c if (l > sizeof(EXT4_I(inode)->i_data)) { l 2221 fs/ext4/namei.c err = __page_symlink(inode, symname, l, l 2233 fs/ext4/namei.c memcpy((char *)&EXT4_I(inode)->i_data, symname, l); l 2234 fs/ext4/namei.c inode->i_size = l-1; l 480 fs/ext4/super.c return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode; l 485 fs/ext4/super.c struct list_head *l; l 491 fs/ext4/super.c list_for_each(l, &sbi->s_orphan) { l 492 fs/ext4/super.c struct inode *inode = orphan_list_entry(l); l 361 fs/gfs2/eattr.c unsigned int l = 0; l 370 fs/gfs2/eattr.c l = 5; l 374 fs/gfs2/eattr.c l = 7; l 378 fs/gfs2/eattr.c l = 9; l 382 fs/gfs2/eattr.c BUG_ON(l == 0); l 384 fs/gfs2/eattr.c memcpy(er->er_data + ei->ei_size, prefix, l); l 385 fs/gfs2/eattr.c memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea), l 24 fs/hfsplus/bnode.c int l; l 30 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE - off); l 31 fs/hfsplus/bnode.c memcpy(buf, kmap(*pagep) + off, l); l 34 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 35 fs/hfsplus/bnode.c buf += l; l 36 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE); l 37 fs/hfsplus/bnode.c memcpy(buf, kmap(*++pagep), l); l 76 fs/hfsplus/bnode.c int l; l 82 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE - off); l 83 fs/hfsplus/bnode.c memcpy(kmap(*pagep) + off, buf, l); l 87 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 88 fs/hfsplus/bnode.c buf += l; l 89 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE); l 90 fs/hfsplus/bnode.c memcpy(kmap(*++pagep), buf, l); l 106 fs/hfsplus/bnode.c int l; l 112 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE - off); l 113 fs/hfsplus/bnode.c memset(kmap(*pagep) + off, 0, l); l 117 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 118 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE); l 119 fs/hfsplus/bnode.c memset(kmap(*++pagep), 0, l); l 130 fs/hfsplus/bnode.c int l; l 144 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE - src); l 145 fs/hfsplus/bnode.c memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); l 150 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 151 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE); l 152 fs/hfsplus/bnode.c memcpy(kmap(*++dst_page), kmap(*++src_page), l); l 164 fs/hfsplus/bnode.c l = PAGE_CACHE_SIZE - src; l 166 fs/hfsplus/bnode.c dst += l; l 168 fs/hfsplus/bnode.c l = PAGE_CACHE_SIZE - dst; l 169 fs/hfsplus/bnode.c src += l; l 172 fs/hfsplus/bnode.c l = min(len, l); l 173 fs/hfsplus/bnode.c memcpy(dst_ptr, src_ptr, l); l 181 fs/hfsplus/bnode.c } while ((len -= l)); l 188 fs/hfsplus/bnode.c int l; l 226 fs/hfsplus/bnode.c l = src; l 228 fs/hfsplus/bnode.c dst -= l; l 230 fs/hfsplus/bnode.c l = dst; l 231 fs/hfsplus/bnode.c src -= l; l 234 fs/hfsplus/bnode.c l = min(len, l); l 235 fs/hfsplus/bnode.c memmove(dst_ptr - l, src_ptr - l, l); l 243 fs/hfsplus/bnode.c } while ((len -= l)); l 252 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE - src); l 253 fs/hfsplus/bnode.c memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l); l 258 fs/hfsplus/bnode.c while ((len -= l) != 0) { l 259 fs/hfsplus/bnode.c l = min(len, (int)PAGE_CACHE_SIZE); l 260 fs/hfsplus/bnode.c memmove(kmap(*++dst_page), kmap(*++src_page), l); l 272 fs/hfsplus/bnode.c l = PAGE_CACHE_SIZE - src; l 274 fs/hfsplus/bnode.c dst += l; l 276 fs/hfsplus/bnode.c l = PAGE_CACHE_SIZE - dst; l 277 fs/hfsplus/bnode.c src += l; l 280 fs/hfsplus/bnode.c l = min(len, l); l 281 fs/hfsplus/bnode.c memmove(dst_ptr, src_ptr, l); l 289 fs/hfsplus/bnode.c } while ((len -= l)); l 338 fs/hpfs/anode.c unsigned l; l 347 fs/hpfs/anode.c l = 0x200 - (pos & 0x1ff); if (l > len) l = len; l 348 fs/hpfs/anode.c memcpy(buf, data + (pos & 0x1ff), l); l 350 fs/hpfs/anode.c buf += l; pos += l; len -= l; l 361 fs/hpfs/anode.c unsigned l; l 370 fs/hpfs/anode.c l = 0x200 - (pos & 0x1ff); if (l > len) l = len; l 371 fs/hpfs/anode.c memcpy(data + (pos & 0x1ff), buf, l); l 374 fs/hpfs/anode.c buf += l; pos += l; len -= l; l 19 fs/hpfs/dentry.c unsigned l = qstr->len; l 21 fs/hpfs/dentry.c if (l == 1) if (qstr->name[0]=='.') goto x; l 22 fs/hpfs/dentry.c if (l == 2) if (qstr->name[0]=='.' || qstr->name[1]=='.') goto x; l 23 fs/hpfs/dentry.c hpfs_adjust_length((char *)qstr->name, &l); l 30 fs/hpfs/dentry.c for (i = 0; i < l; i++) l 26 fs/hpfs/name.c int l = strlen(text_postfix[i]); l 27 fs/hpfs/name.c if (l <= len) l 28 fs/hpfs/name.c if (!hpfs_compare_names(inode->i_sb, text_postfix[i], l, name + len - l, l, 0)) l 32 fs/hpfs/name.c int l = strlen(text_prefix[i]); l 33 fs/hpfs/name.c if (l <= len) l 34 fs/hpfs/name.c if (!hpfs_compare_names(inode->i_sb, text_prefix[i], l, name, l, 0)) l 109 fs/hpfs/name.c unsigned l = l1 < l2 ? l1 : l2; l 112 fs/hpfs/name.c for (i = 0; i < l; i++) { l 594 fs/hugetlbfs/inode.c int l = strlen(symname)+1; l 595 fs/hugetlbfs/inode.c error = page_symlink(inode, symname, l); l 672 fs/jbd2/journal.c int l = *pos; l 674 fs/jbd2/journal.c if (l == 0) l 679 fs/jbd2/journal.c l--; l 680 fs/jbd2/journal.c while (l) { l 684 fs/jbd2/journal.c l--; l 1082 fs/jffs2/scan.c list_for_each(tmp, l) { l 123 fs/jfs/jfs_dmap.h (((l) == 2) ? 1 : ((l) == 1) ? BLKTOL1((b),(s)) : BLKTOL0((b),(s))) l 315 fs/locks.c switch (l->l_whence) { l 331 fs/locks.c start += l->l_start; l 335 fs/locks.c if (l->l_len > 0) { l 336 fs/locks.c end = start + l->l_len - 1; l 338 fs/locks.c } else if (l->l_len < 0) { l 341 fs/locks.c start += l->l_len; l 356 fs/locks.c return assign_type(fl, l->l_type); l 365 fs/locks.c switch (l->l_whence) { l 379 fs/locks.c start += l->l_start; l 383 fs/locks.c if (l->l_len > 0) { l 384 fs/locks.c fl->fl_end = start + l->l_len - 1; l 385 fs/locks.c } else if (l->l_len < 0) { l 387 fs/locks.c start += l->l_len; l 402 fs/locks.c switch (l->l_type) { l 406 fs/locks.c fl->fl_type = l->l_type; l 1672 fs/locks.c if (copy_from_user(&flock, l, sizeof(flock))) l 1693 fs/locks.c if (!copy_to_user(l, &flock, sizeof(flock))) l 1784 fs/locks.c if (copy_from_user(&flock, l, sizeof(flock))) l 1857 fs/locks.c if (copy_from_user(&flock, l, sizeof(flock))) l 1876 fs/locks.c if (!copy_to_user(l, &flock, sizeof(flock))) l 1902 fs/locks.c if (copy_from_user(&flock, l, sizeof(flock))) l 203 fs/mbcache.c struct list_head *l, *ltmp; l 207 fs/mbcache.c list_for_each(l, &mb_cache_list) { l 209 fs/mbcache.c list_entry(l, struct mb_cache, c_cache_list); l 227 fs/mbcache.c list_for_each_safe(l, ltmp, &free_list) { l 228 fs/mbcache.c __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, l 328 fs/mbcache.c struct list_head *l, *ltmp; l 331 fs/mbcache.c list_for_each_safe(l, ltmp, &mb_cache_lru_list) { l 333 fs/mbcache.c list_entry(l, struct mb_cache_entry, e_lru_list); l 340 fs/mbcache.c list_for_each_safe(l, ltmp, &free_list) { l 341 fs/mbcache.c __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, l 358 fs/mbcache.c struct list_head *l, *ltmp; l 362 fs/mbcache.c list_for_each_safe(l, ltmp, &mb_cache_lru_list) { l 364 fs/mbcache.c list_entry(l, struct mb_cache_entry, e_lru_list); l 373 fs/mbcache.c list_for_each_safe(l, ltmp, &free_list) { l 374 fs/mbcache.c __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry, l 440 fs/mbcache.c struct list_head *l; l 446 fs/mbcache.c list_for_each_prev(l, &cache->c_block_hash[bucket]) { l 448 fs/mbcache.c list_entry(l, struct mb_cache_entry, e_block_list); l 513 fs/mbcache.c struct list_head *l; l 519 fs/mbcache.c list_for_each(l, &cache->c_block_hash[bucket]) { l 520 fs/mbcache.c ce = list_entry(l, struct mb_cache_entry, e_block_list); l 559 fs/mbcache.c while (l != head) { l 561 fs/mbcache.c list_entry(l, struct mb_cache_entry, l 590 fs/mbcache.c l = l->next; l 614 fs/mbcache.c struct list_head *l; l 619 fs/mbcache.c l = cache->c_indexes_hash[index][bucket].next; l 620 fs/mbcache.c ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], l 652 fs/mbcache.c struct list_head *l; l 657 fs/mbcache.c l = prev->e_indexes[index].o_list.next; l 658 fs/mbcache.c ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket], l 133 fs/minix/dir.c unsigned l = strnlen(name, sbi->s_namelen); l 135 fs/minix/dir.c over = filldir(dirent, name, l, l 51 fs/nls/nls_base.c long l; l 57 fs/nls/nls_base.c l = c0; l 61 fs/nls/nls_base.c l &= t->lmask; l 62 fs/nls/nls_base.c if (l < t->lval) l 64 fs/nls/nls_base.c *p = l; l 73 fs/nls/nls_base.c l = (l << 6) | c; l 110 fs/nls/nls_base.c long l; l 117 fs/nls/nls_base.c l = wc; l 121 fs/nls/nls_base.c if (l <= t->lmask) { l 123 fs/nls/nls_base.c *s = t->cval | (l >> c); l 127 fs/nls/nls_base.c *s = 0x80 | ((l >> c) & 0x3F); l 17 fs/nls/nls_euc-jp.c #define IS_SJIS_LOW_BYTE(l) ((0x40 <= (l)) && ((l) <= 0xFC) && ((l) != 0x7F)) l 21 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 24 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 26 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 28 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 30 fs/nls/nls_euc-jp.c && IS_SJIS_LOW_BYTE(l)) l 44 fs/nls/nls_euc-jp.c #define IS_EUC_JISX0208(h, l) (IS_EUC_BYTE(h) && IS_EUC_BYTE(l)) l 45 fs/nls/nls_euc-jp.c #define IS_EUC_JISX0201KANA(h, l) (((h) == SS2) && (0xA1 <= (l) && (l) <= 0xDF)) l 47 fs/nls/nls_euc-jp.c && IS_EUC_BYTE(l)) l 48 fs/nls/nls_euc-jp.c #define IS_EUC_UDC_HI(h, l) IS_EUC_UDC_LOW(h, l) /* G3 block */ l 142 fs/nls/nls_euc-jp.c (((h) == 0xA2 && (l) == 0xCC) || ((h) == 0xA2 && (l) == 0xE8)) l 1086 fs/ntfs/runlist.c s64 l = n; l 1092 fs/ntfs/runlist.c l >>= 8; l 1094 fs/ntfs/runlist.c } while (l != 0 && l != -1); l 1255 fs/ntfs/runlist.c s64 l = n; l 1263 fs/ntfs/runlist.c *dst++ = l & 0xffll; l 1264 fs/ntfs/runlist.c l >>= 8; l 1266 fs/ntfs/runlist.c } while (l != 0 && l != -1); l 1484 fs/ocfs2/namei.c int status, l, credits; l 1504 fs/ocfs2/namei.c l = strlen(symname) + 1; l 1544 fs/ocfs2/namei.c if (l > ocfs2_fast_symlink_chars(sb)) { l 1572 fs/ocfs2/namei.c newsize = l - 1; l 1573 fs/ocfs2/namei.c if (l > ocfs2_fast_symlink_chars(sb)) { l 1595 fs/ocfs2/namei.c memcpy((char *) fe->id2.i_symlink, symname, l); l 2575 fs/ocfs2/xattr.c const struct ocfs2_xattr_entry *l = a, *r = b; l 2576 fs/ocfs2/xattr.c u32 l_hash = le32_to_cpu(l->xe_name_hash); l 2588 fs/ocfs2/xattr.c struct ocfs2_xattr_entry *l = a, *r = b, tmp; l 2590 fs/ocfs2/xattr.c tmp = *l; l 2591 fs/ocfs2/xattr.c memcpy(l, r, sizeof(struct ocfs2_xattr_entry)); l 2864 fs/ocfs2/xattr.c const struct ocfs2_xattr_entry *l = a, *r = b; l 2865 fs/ocfs2/xattr.c u32 l_name_offset = le16_to_cpu(l->xe_name_offset); l 98 fs/partitions/mac.c int i, l; l 101 fs/partitions/mac.c l = strlen(part->name); l 104 fs/partitions/mac.c for (i = 0; i <= l - 4; ++i) { l 251 fs/partitions/msdos.c struct bsd_disklabel *l; l 254 fs/partitions/msdos.c l = (struct bsd_disklabel *)read_dev_sector(bdev, offset+1, §); l 255 fs/partitions/msdos.c if (!l) l 257 fs/partitions/msdos.c if (le32_to_cpu(l->d_magic) != BSD_DISKMAGIC) { l 263 fs/partitions/msdos.c if (le16_to_cpu(l->d_npartitions) < max_partitions) l 264 fs/partitions/msdos.c max_partitions = le16_to_cpu(l->d_npartitions); l 265 fs/partitions/msdos.c for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) { l 284 fs/partitions/msdos.c if (le16_to_cpu(l->d_npartitions) > max_partitions) l 286 fs/partitions/msdos.c le16_to_cpu(l->d_npartitions) - max_partitions); l 331 fs/partitions/msdos.c struct unixware_disklabel *l; l 334 fs/partitions/msdos.c l = (struct unixware_disklabel *)read_dev_sector(bdev, offset+29, §); l 335 fs/partitions/msdos.c if (!l) l 337 fs/partitions/msdos.c if (le32_to_cpu(l->d_magic) != UNIXWARE_DISKMAGIC || l 338 fs/partitions/msdos.c le32_to_cpu(l->vtoc.v_magic) != UNIXWARE_DISKMAGIC2) { l 343 fs/partitions/msdos.c p = &l->vtoc.v_slice[1]; l 345 fs/partitions/msdos.c while (p - &l->vtoc.v_slice[0] < UNIXWARE_NUMSLICE) { l 99 fs/proc/task_mmu.c loff_t l = *pos; l 138 fs/proc/task_mmu.c if ((unsigned long)l < mm->map_count) { l 140 fs/proc/task_mmu.c while (l-- && vma) l 145 fs/proc/task_mmu.c if (l != mm->map_count) l 132 fs/ramfs/inode.c int l = strlen(symname)+1; l 133 fs/ramfs/inode.c error = page_symlink(inode, symname, l); l 857 fs/reiserfs/fix_node.c struct buffer_head *l, *f; l 861 fs/reiserfs/fix_node.c (l = tb->FL[h]) == NULL) l 864 fs/reiserfs/fix_node.c if (f == l) l 867 fs/reiserfs/fix_node.c order = B_NR_ITEMS(l); l 868 fs/reiserfs/fix_node.c f = l; l 507 fs/reiserfs/item_ops.c int k, l; l 509 fs/reiserfs/item_ops.c l = 0; l 511 fs/reiserfs/item_ops.c l += dir_u->entry_sizes[k]; l 513 fs/reiserfs/item_ops.c if (l + IH_SIZE != vi->vi_item_len + l 839 fs/reiserfs/journal.c #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list) l 414 fs/reiserfs/procfs.c loff_t l = *pos; l 416 fs/reiserfs/procfs.c if (l) l 92 fs/sysv/namei.c int l = strlen(symname)+1; l 95 fs/sysv/namei.c if (l > dir->i_sb->s_blocksize) l 104 fs/sysv/namei.c err = page_symlink(inode, symname, l); l 762 fs/ubifs/io.c int err, l; l 790 fs/ubifs/io.c l = le32_to_cpu(ch->len); l 791 fs/ubifs/io.c if (l != len) { l 792 fs/ubifs/io.c ubifs_err("bad node length %d, expected %d", l, len); l 159 fs/udf/partition.c int i, j, k, l; l 223 fs/udf/partition.c for (l = k; l < reallocationTableLen; l++) { l 224 fs/udf/partition.c struct sparingEntry *entry = &st->mapEntry[l]; l 236 fs/udf/partition.c mapEntry = st->mapEntry[l]; l 241 fs/udf/partition.c (l - k) * l 60 fs/udf/udftime.c #define SPY(y, l, s) (SPD * (365 * y + l) + s) l 128 fs/ufs/namei.c unsigned l = strlen(symname)+1; l 131 fs/ufs/namei.c if (l > sb->s_blocksize) l 140 fs/ufs/namei.c if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { l 144 fs/ufs/namei.c err = page_symlink(inode, symname, l); l 150 fs/ufs/namei.c memcpy((char*)&UFS_I(inode)->i_u1.i_data,symname,l); l 151 fs/ufs/namei.c inode->i_size = l-1; l 75 fs/xfs/linux-2.6/xfs_vfs.h #define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l)) l 82 fs/xfs/linux-2.6/xfs_vnode.c xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); l 93 fs/xfs/quota/xfs_qm.c cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \ l 94 fs/xfs/quota/xfs_qm.c for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \ l 1149 fs/xfs/quota/xfs_qm_syscalls.c cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \ l 1150 fs/xfs/quota/xfs_qm_syscalls.c for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \ l 64 fs/xfs/xfs_alloc.c xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) l 68 fs/xfs/xfs_alloc.c xfs_alloc_trace_busy(__func__, s, mp, ag, agb, l, 0, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) l 612 fs/xfs/xfs_btree.c return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO; l 904 fs/xfs/xfs_btree.c if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) l 906 fs/xfs/xfs_btree.c if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO) l 83 fs/xfs/xfs_btree.h } l; /* long form pointers */ l 2369 fs/xfs/xfs_da_btree.c uint l; l 2383 fs/xfs/xfs_da_btree.c l = f + XFS_BUF_COUNT(bp) - 1; l 2386 fs/xfs/xfs_da_btree.c if (l > last) l 2387 fs/xfs/xfs_da_btree.c l = last; l 2388 fs/xfs/xfs_da_btree.c if (f <= l) l 2389 fs/xfs/xfs_da_btree.c xfs_trans_log_buf(tp, bp, f - off, l - off); l 58 fs/xfs/xfs_error.h goto l; \ l 1020 fs/xfs/xfs_log.c xlog_t *l; l 1033 fs/xfs/xfs_log.c l = iclog->ic_log; l 1042 fs/xfs/xfs_log.c l->l_mp->m_flags &= ~XFS_MOUNT_BARRIER; l 1043 fs/xfs/xfs_log.c xfs_fs_cmn_err(CE_WARN, l->l_mp, l 1052 fs/xfs/xfs_log.c if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp, l 1054 fs/xfs/xfs_log.c xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp)); l 1056 fs/xfs/xfs_log.c xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR); l 604 fs/xfs/xfs_rtalloc.c int l; /* level number (loop control) */ l 619 fs/xfs/xfs_rtalloc.c for (l = xfs_highbit32(maxlen); l < mp->m_rsumlevels; l++) { l 627 fs/xfs/xfs_rtalloc.c error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, l 677 fs/xfs/xfs_rtalloc.c for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) { l 686 fs/xfs/xfs_rtalloc.c error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb, l 702 fs/xfs/xfs_rtalloc.c XFS_RTMAX(minlen, 1 << l), l 703 fs/xfs/xfs_rtalloc.c XFS_RTMIN(maxlen, (1 << (l + 1)) - 1), l 50 include/acpi/acmacros.h #define ACPI_LOWORD(l) ((u16)(u32)(l)) l 51 include/acpi/acmacros.h #define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF)) l 52 include/acpi/acmacros.h #define ACPI_LOBYTE(l) ((u8)(u16)(l)) l 53 include/acpi/acmacros.h #define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF)) l 283 include/asm-frv/bitops.h struct { u32 h, l; }; l 304 include/asm-frv/bitops.h : "0r"(_.h), "r"(_.l) l 365 include/asm-frv/bitops.h struct { u32 h, l; }; l 380 include/asm-frv/bitops.h : "0r"(_.h), "r"(_.l) l 34 include/asm-frv/dm9000.h #define insl(a,b,l) __insl(a,b,l,0) /* don't byte-swap */ l 180 include/asm-frv/io.h #define outsb(a,b,l) io_outsb(a,b,l) l 181 include/asm-frv/io.h #define outsw(a,b,l) io_outsw(a,b,l) l 182 include/asm-frv/io.h #define outsl(a,b,l) __outsl(a,b,l,0) l 184 include/asm-frv/io.h #define insb(a,b,l) io_insb(a,b,l) l 185 include/asm-frv/io.h #define insw(a,b,l) io_insw(a,b,l) l 186 include/asm-frv/io.h #define insl(a,b,l) __insl(a,b,l,0) l 170 include/asm-frv/math-emu.h move.l (FPS_PC+4,%sp),\dest l 174 include/asm-frv/math-emu.h move.l \src,(FPS_PC+4,%sp) l 179 include/asm-frv/math-emu.h addq.l #\s,%sp@(FPS_PC+4) l 187 include/asm-frv/math-emu.h fp_get_instr_data l,4,\dest,\label,\addr l 201 include/asm-frv/math-emu.h .Lu2\@: move.l \addr,%a0 l 218 include/asm-frv/math-emu.h .Lu3\@: move.l \addr,%a0 l 233 include/asm-frv/math-emu.h move.l \arg1,-(%sp) l 244 include/asm-frv/math-emu.h movem.l %d0/%d1/%a0/%a1,-(%sp) l 260 include/asm-frv/math-emu.h movem.l (%sp)+,%d0/%d1/%a0/%a1 l 266 include/asm-frv/math-emu.h movem.l %d0/%a0,-(%sp) l 29 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 36 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 43 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 50 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 57 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 64 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 71 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 78 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 85 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 92 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 99 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 106 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 113 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 120 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 127 include/asm-generic/atomic.h atomic64_t *v = (atomic64_t *)l; l 132 include/asm-generic/atomic.h #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) l 135 include/asm-generic/atomic.h (atomic_cmpxchg((atomic64_t *)(l), (old), (new))) l 137 include/asm-generic/atomic.h (atomic_xchg((atomic64_t *)(l), (new))) l 146 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 153 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 160 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 167 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 174 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 181 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 188 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 195 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 202 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 209 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 216 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 223 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 230 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 237 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 244 include/asm-generic/atomic.h atomic_t *v = (atomic_t *)l; l 249 include/asm-generic/atomic.h #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) l 252 include/asm-generic/atomic.h (atomic_cmpxchg((atomic_t *)(l), (old), (new))) l 254 include/asm-generic/atomic.h (atomic_xchg((atomic_t *)(l), (new))) l 22 include/asm-generic/bitops/atomic.h raw_spinlock_t *s = ATOMIC_HASH(l); \ l 28 include/asm-generic/bitops/atomic.h raw_spinlock_t *s = ATOMIC_HASH(l); \ l 29 include/asm-generic/local.h #define local_read(l) atomic_long_read(&(l)->a) l 30 include/asm-generic/local.h #define local_set(l,i) atomic_long_set((&(l)->a),(i)) l 31 include/asm-generic/local.h #define local_inc(l) atomic_long_inc(&(l)->a) l 32 include/asm-generic/local.h #define local_dec(l) atomic_long_dec(&(l)->a) l 33 include/asm-generic/local.h #define local_add(i,l) atomic_long_add((i),(&(l)->a)) l 34 include/asm-generic/local.h #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) l 36 include/asm-generic/local.h #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) l 37 include/asm-generic/local.h #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) l 38 include/asm-generic/local.h #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) l 39 include/asm-generic/local.h #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) l 40 include/asm-generic/local.h #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) l 41 include/asm-generic/local.h #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) l 42 include/asm-generic/local.h #define local_inc_return(l) atomic_long_inc_return(&(l)->a) l 44 include/asm-generic/local.h #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) l 45 include/asm-generic/local.h #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) l 46 include/asm-generic/local.h #define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u)) l 47 include/asm-generic/local.h #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) l 51 include/asm-generic/local.h #define __local_inc(l) local_set((l), local_read(l) + 1) l 52 include/asm-generic/local.h #define __local_dec(l) local_set((l), local_read(l) - 1) l 53 include/asm-generic/local.h #define __local_add(i,l) local_set((l), local_read(l) + (i)) l 54 include/asm-generic/local.h #define __local_sub(i,l) local_set((l), local_read(l) - (i)) l 60 include/asm-generic/local.h #define cpu_local_read(l) local_read(&__get_cpu_var(l)) l 61 include/asm-generic/local.h #define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) l 62 include/asm-generic/local.h #define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) l 63 include/asm-generic/local.h #define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) l 64 include/asm-generic/local.h #define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) l 65 include/asm-generic/local.h #define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) l 70 include/asm-generic/local.h #define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) l 71 include/asm-generic/local.h #define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) l 72 include/asm-generic/local.h #define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) l 73 include/asm-generic/local.h #define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) l 124 include/asm-m32r/io.h *(volatile unsigned long __force *)addr = l; l 38 include/asm-m32r/local.h #define local_read(l) ((l)->counter) l 47 include/asm-m32r/local.h #define local_set(l, i) (((l)->counter) = (i)) l 69 include/asm-m32r/local.h : "r" (&l->counter), "r" (i) l 100 include/asm-m32r/local.h : "r" (&l->counter), "r" (i) l 118 include/asm-m32r/local.h #define local_add(i, l) ((void) local_add_return((i), (l))) l 127 include/asm-m32r/local.h #define local_sub(i, l) ((void) local_sub_return((i), (l))) l 138 include/asm-m32r/local.h #define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0) l 159 include/asm-m32r/local.h : "r" (&l->counter) l 189 include/asm-m32r/local.h : "r" (&l->counter) l 206 include/asm-m32r/local.h #define local_inc(l) ((void)local_inc_return(l)) l 214 include/asm-m32r/local.h #define local_dec(l) ((void)local_dec_return(l)) l 224 include/asm-m32r/local.h #define local_inc_and_test(l) (local_inc_return(l) == 0) l 234 include/asm-m32r/local.h #define local_dec_and_test(l) (local_dec_return(l) == 0) l 245 include/asm-m32r/local.h #define local_add_negative(i, l) (local_add_return((i), (l)) < 0) l 247 include/asm-m32r/local.h #define local_cmpxchg(l, o, n) (cmpxchg_local(&((l)->counter), (o), (n))) l 248 include/asm-m32r/local.h #define local_xchg(v, new) (xchg_local(&((l)->counter), new)) l 262 include/asm-m32r/local.h c = local_read(l); l 266 include/asm-m32r/local.h old = local_cmpxchg((l), c, c + (a)); l 274 include/asm-m32r/local.h #define local_inc_not_zero(l) local_add_unless((l), 1, 0) l 331 include/asm-m32r/local.h #define __local_inc(l) ((l)->a.counter++) l 332 include/asm-m32r/local.h #define __local_dec(l) ((l)->a.counter++) l 333 include/asm-m32r/local.h #define __local_add(i, l) ((l)->a.counter += (i)) l 334 include/asm-m32r/local.h #define __local_sub(i, l) ((l)->a.counter -= (i)) l 346 include/asm-m32r/local.h res__ = (l); \ l 351 include/asm-m32r/local.h l; \ l 354 include/asm-m32r/local.h #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) l 355 include/asm-m32r/local.h #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) l 356 include/asm-m32r/local.h #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) l 357 include/asm-m32r/local.h #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) l 358 include/asm-m32r/local.h #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) l 359 include/asm-m32r/local.h #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) l 361 include/asm-m32r/local.h #define __cpu_local_inc(l) cpu_local_inc(l) l 362 include/asm-m32r/local.h #define __cpu_local_dec(l) cpu_local_dec(l) l 363 include/asm-m32r/local.h #define __cpu_local_add(i, l) cpu_local_add((i), (l)) l 364 include/asm-m32r/local.h #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) l 508 include/asm-m68k/atarihw.h u_long l; l 170 include/asm-m68k/math-emu.h move.l (FPS_PC+4,%sp),\dest l 174 include/asm-m68k/math-emu.h move.l \src,(FPS_PC+4,%sp) l 179 include/asm-m68k/math-emu.h addq.l #\s,%sp@(FPS_PC+4) l 187 include/asm-m68k/math-emu.h fp_get_instr_data l,4,\dest,\label,\addr l 201 include/asm-m68k/math-emu.h .Lu2\@: move.l \addr,%a0 l 218 include/asm-m68k/math-emu.h .Lu3\@: move.l \addr,%a0 l 235 include/asm-m68k/math-emu.h .irp m b,w,l l 248 include/asm-m68k/math-emu.h move.l \arg1,-(%sp) l 259 include/asm-m68k/math-emu.h movem.l %d0/%d1/%a0/%a1,-(%sp) l 275 include/asm-m68k/math-emu.h movem.l (%sp)+,%d0/%d1/%a0/%a1 l 281 include/asm-m68k/math-emu.h movem.l %d0/%a0,-(%sp) l 45 include/asm-m68k/raw_io.h #define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l)) l 47 include/asm-m68k/raw_io.h #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l)) l 80 include/asm-m68k/uaccess.h __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ l 148 include/asm-m68k/uaccess.h __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ l 238 include/asm-m68k/uaccess.h __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4); l 241 include/asm-m68k/uaccess.h __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,); l 244 include/asm-m68k/uaccess.h __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,); l 247 include/asm-m68k/uaccess.h __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b); l 250 include/asm-m68k/uaccess.h __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,); l 253 include/asm-m68k/uaccess.h __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b); l 256 include/asm-m68k/uaccess.h __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w); l 259 include/asm-m68k/uaccess.h __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l); l 319 include/asm-m68k/uaccess.h __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); l 322 include/asm-m68k/uaccess.h __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); l 325 include/asm-m68k/uaccess.h __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,); l 328 include/asm-m68k/uaccess.h __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b); l 331 include/asm-m68k/uaccess.h __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,); l 334 include/asm-m68k/uaccess.h __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b); l 337 include/asm-m68k/uaccess.h __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w); l 340 include/asm-m68k/uaccess.h __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l); l 36 include/asm-mn10300/div64.h unsigned long long l; \ l 39 include/asm-mn10300/div64.h __quot.l = n; \ l 53 include/asm-mn10300/div64.h n = __quot.l; \ l 34 include/asm-mn10300/unit-asb2303/smc91111.h #define SMC_insw(a, r, p, l) insw((unsigned long) ((a) + (r)), (p), (l)) l 35 include/asm-mn10300/unit-asb2303/smc91111.h #define SMC_outsw(a, r, p, l) outsw((unsigned long) ((a) + (r)), (p), (l)) l 41 include/asm-mn10300/unit-asb2303/smc91111.h #define SMC_insl(a, r, p, l) insl((unsigned long) ((a) + (r)), (p), (l)) l 42 include/asm-mn10300/unit-asb2303/smc91111.h #define SMC_outsl(a, r, p, l) outsl((unsigned long) ((a) + (r)), (p), (l)) l 60 include/asm-parisc/assembly.h #define BL b,l l 35 include/asm-parisc/atomic.h raw_spinlock_t *s = ATOMIC_HASH(l); \ l 41 include/asm-parisc/atomic.h raw_spinlock_t *s = ATOMIC_HASH(l); \ l 17 include/asm-parisc/system.h unsigned int l:1; l 18 include/asm-x86/asm.h #define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) l 31 include/asm-x86/desc.h desc->l = 0; l 32 include/asm-x86/desc_defs.h unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; l 93 include/asm-x86/kvm.h __u8 present, dpl, db, s, l, g, avl; l 404 include/asm-x86/kvm_host.h void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); l 16 include/asm-x86/local.h #define local_read(l) atomic_long_read(&(l)->a) l 17 include/asm-x86/local.h #define local_set(l, i) atomic_long_set(&(l)->a, (i)) l 22 include/asm-x86/local.h : "+m" (l->a.counter)); l 28 include/asm-x86/local.h : "+m" (l->a.counter)); l 34 include/asm-x86/local.h : "+m" (l->a.counter) l 41 include/asm-x86/local.h : "+m" (l->a.counter) l 59 include/asm-x86/local.h : "+m" (l->a.counter), "=qm" (c) l 77 include/asm-x86/local.h : "+m" (l->a.counter), "=qm" (c) l 95 include/asm-x86/local.h : "+m" (l->a.counter), "=qm" (c) l 114 include/asm-x86/local.h : "+m" (l->a.counter), "=qm" (c) l 137 include/asm-x86/local.h : "+r" (i), "+m" (l->a.counter) l 144 include/asm-x86/local.h __i = local_read(l); l 145 include/asm-x86/local.h local_set(l, i + __i); l 153 include/asm-x86/local.h return local_add_return(-i, l); l 156 include/asm-x86/local.h #define local_inc_return(l) (local_add_return(1, l)) l 157 include/asm-x86/local.h #define local_dec_return(l) (local_sub_return(1, l)) l 160 include/asm-x86/local.h (cmpxchg_local(&((l)->a.counter), (o), (n))) l 162 include/asm-x86/local.h #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) l 176 include/asm-x86/local.h c = local_read((l)); \ l 180 include/asm-x86/local.h old = local_cmpxchg((l), c, c + (a)); \ l 187 include/asm-x86/local.h #define local_inc_not_zero(l) local_add_unless((l), 1, 0) l 193 include/asm-x86/local.h #define __local_inc(l) local_inc(l) l 194 include/asm-x86/local.h #define __local_dec(l) local_dec(l) l 195 include/asm-x86/local.h #define __local_add(i, l) local_add((i), (l)) l 196 include/asm-x86/local.h #define __local_sub(i, l) local_sub((i), (l)) l 212 include/asm-x86/local.h res__ = (l); \ l 219 include/asm-x86/local.h (l); \ l 223 include/asm-x86/local.h #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) l 224 include/asm-x86/local.h #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) l 225 include/asm-x86/local.h #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) l 226 include/asm-x86/local.h #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) l 227 include/asm-x86/local.h #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) l 228 include/asm-x86/local.h #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) l 230 include/asm-x86/local.h #define __cpu_local_inc(l) cpu_local_inc((l)) l 231 include/asm-x86/local.h #define __cpu_local_dec(l) cpu_local_dec((l)) l 232 include/asm-x86/local.h #define __cpu_local_add(i, l) cpu_local_add((i), (l)) l 233 include/asm-x86/local.h #define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) l 225 include/asm-x86/msr.h rdmsr(msr_no, *l, *h); l 230 include/asm-x86/msr.h wrmsr(msr_no, l, h); l 236 include/asm-x86/msr.h return rdmsr_safe(msr_no, l, h); l 240 include/asm-x86/msr.h return wrmsr_safe(msr_no, l, h); l 237 include/linux/amba/clcd.h #define CHECK(e,l,h) (var->e < l || var->e > h) l 57 include/linux/atm.h #define __SO_ENCODE(l,n,t) ((((l) & 0x1FF) << 22) | ((n) << 16) | \ l 110 include/linux/bitops.h if (sizeof(l) == 4) l 111 include/linux/bitops.h return fls(l); l 112 include/linux/bitops.h return fls64(l); l 194 include/linux/byteorder/swab.h __u32 l = x & ((1ULL<<32)-1); l 195 include/linux/byteorder/swab.h return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h))); l 137 include/linux/isdn/capiutil.h unsigned l, p; l 417 include/linux/lockdep.h # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) l 418 include/linux/lockdep.h # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) l 420 include/linux/lockdep.h # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) l 421 include/linux/lockdep.h # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) l 423 include/linux/lockdep.h # define spin_release(l, n, i) lock_release(l, n, i) l 431 include/linux/lockdep.h # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) l 432 include/linux/lockdep.h # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) l 434 include/linux/lockdep.h # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) l 435 include/linux/lockdep.h # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) l 437 include/linux/lockdep.h # define rwlock_release(l, n, i) lock_release(l, n, i) l 446 include/linux/lockdep.h # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) l 448 include/linux/lockdep.h # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) l 450 include/linux/lockdep.h # define mutex_release(l, n, i) lock_release(l, n, i) l 458 include/linux/lockdep.h # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) l 459 include/linux/lockdep.h # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) l 461 include/linux/lockdep.h # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) l 462 include/linux/lockdep.h # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) l 464 include/linux/lockdep.h # define rwsem_release(l, n, i) lock_release(l, n, i) l 473 include/linux/lockdep.h # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) l 475 include/linux/lockdep.h # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) l 477 include/linux/lockdep.h # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) l 6 include/linux/mfd/tmio.h #define tmio_ioread16_rep(r, b, l) readsw(r, b, l) l 12 include/linux/mfd/tmio.h #define tmio_iowrite16_rep(r, b, l) writesw(r, b, l) l 135 include/linux/mmc/card.h #define mmc_list_to_card(l) container_of(l, struct mmc_card, node) l 264 include/linux/nfsd/nfsd.h #define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) l 13 include/linux/romfs_fs.h #define __mkw(h,l) (((h)&0x00ff)<< 8|((l)&0x00ff)) l 14 include/linux/romfs_fs.h #define __mkl(h,l) (((h)&0xffff)<<16|((l)&0xffff)) l 136 include/linux/sunrpc/svcauth.h unsigned long l = 0; l 143 include/linux/sunrpc/svcauth.h l = (l << 8) | c; l 146 include/linux/sunrpc/svcauth.h hash = hash_long(hash^l, BITS_PER_LONG); l 154 include/linux/sunrpc/svcauth.h unsigned long l = 0; l 162 include/linux/sunrpc/svcauth.h l = (l << 8) | c; l 165 include/linux/sunrpc/svcauth.h hash = hash_long(hash^l, BITS_PER_LONG); l 20 include/linux/sunrpc/xdr.h #define XDR_QUADLEN(l) (((l) + 3) >> 2) l 76 include/linux/swab.h __u32 l = val & ((1ULL << 32) - 1); l 77 include/linux/swab.h return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h))); l 47 include/pcmcia/mem_op.h int l; l 52 include/pcmcia/mem_op.h get_user(l, (int *)from); l 53 include/pcmcia/mem_op.h __raw_writel(l, to); l 591 include/video/pm3fb.h #define PM3FBDestReadMode_Layout0(l) (((l) & 0x3) << 12) l 592 include/video/pm3fb.h #define PM3FBDestReadMode_Layout1(l) (((l) & 0x3) << 14) l 593 include/video/pm3fb.h #define PM3FBDestReadMode_Layout2(l) (((l) & 0x3) << 16) l 594 include/video/pm3fb.h #define PM3FBDestReadMode_Layout3(l) (((l) & 0x3) << 18) l 620 include/video/pm3fb.h #define PM3FBSourceReadMode_Layout(l) (((l) & 0x3) << 8) l 660 include/video/pm3fb.h #define PM3FBWriteMode_Layout0(l) (((l) & 0x3) << 16) l 661 include/video/pm3fb.h #define PM3FBWriteMode_Layout1(l) (((l) & 0x3) << 18) l 662 include/video/pm3fb.h #define PM3FBWriteMode_Layout2(l) (((l) & 0x3) << 20) l 663 include/video/pm3fb.h #define PM3FBWriteMode_Layout3(l) (((l) & 0x3) << 22) l 1744 kernel/cgroup.c struct list_head *l = it->cg_link; l 1750 kernel/cgroup.c l = l->next; l 1751 kernel/cgroup.c if (l == &cgrp->css_sets) { l 1755 kernel/cgroup.c link = list_entry(l, struct cg_cgroup_link, cgrp_link_list); l 1758 kernel/cgroup.c it->cg_link = l; l 1809 kernel/cgroup.c struct list_head *l = it->task; l 1814 kernel/cgroup.c res = list_entry(l, struct task_struct, cg_list); l 1816 kernel/cgroup.c l = l->next; l 1817 kernel/cgroup.c if (l == &res->cgroups->tasks) { l 1822 kernel/cgroup.c it->task = l; l 122 kernel/kfifo.c unsigned int l; l 134 kernel/kfifo.c l = min(len, fifo->size - (fifo->in & (fifo->size - 1))); l 135 kernel/kfifo.c memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l); l 138 kernel/kfifo.c memcpy(fifo->buffer, buffer + l, len - l); l 168 kernel/kfifo.c unsigned int l; l 180 kernel/kfifo.c l = min(len, fifo->size - (fifo->out & (fifo->size - 1))); l 181 kernel/kfifo.c memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l); l 184 kernel/kfifo.c memcpy(buffer + l, fifo->buffer, len - l); l 451 kernel/lockdep_proc.c const struct lock_stat_data *dl = l, *dr = r; l 40 kernel/mutex-debug.h struct mutex *l = container_of(lock, struct mutex, wait_lock); \ l 45 kernel/mutex-debug.h DEBUG_LOCKS_WARN_ON(l->magic != l); \ l 183 kernel/params.c tmptype l; \ l 187 kernel/params.c ret = strtolfn(val, 0, &l); \ l 188 kernel/params.c if (ret == -EINVAL || ((type)l != l)) \ l 190 kernel/params.c *((type *)kp->arg) = l; \ l 60 kernel/resource.c loff_t l = 0; l 62 kernel/resource.c for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) l 82 kernel/rtmutex.c # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c) l 939 kernel/trace/ftrace.c loff_t l = -1; l 942 kernel/trace/ftrace.c for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) l 945 kernel/trace/ftrace.c l = *pos; l 946 kernel/trace/ftrace.c p = t_next(m, p, &l); l 1212 kernel/trace/trace.c loff_t l = 0; l 1240 kernel/trace/trace.c for (p = iter; p && l < *pos; p = s_next(m, p, &l)) l 1244 kernel/trace/trace.c l = *pos - 1; l 1245 kernel/trace/trace.c p = s_next(m, p, &l); l 1991 kernel/trace/trace.c loff_t l = 0; l 1994 kernel/trace/trace.c for (; t && l < *pos; t = t_next(m, t, &l)) l 91 lib/idr.c int l = 0; l 101 lib/idr.c if (!(p = pa[++l])) l 137 lib/idr.c int l, id, oid; l 143 lib/idr.c l = idp->layers; l 144 lib/idr.c pa[l--] = NULL; l 149 lib/idr.c n = (id >> (IDR_BITS*l)) & IDR_MASK; l 154 lib/idr.c l++; l 156 lib/idr.c id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; l 159 lib/idr.c if (!(p = pa[l])) { l 167 lib/idr.c sh = IDR_BITS * (l + 1); l 174 lib/idr.c sh = IDR_BITS*l; l 179 lib/idr.c if (l == 0) l 191 lib/idr.c pa[l--] = p; l 195 lib/idr.c pa[l] = p; l 340 lib/inflate.c int l; /* bits per table (returned in m) */ l 389 lib/inflate.c l = *m; l 394 lib/inflate.c if ((unsigned)l < j) l 395 lib/inflate.c l = j; l 400 lib/inflate.c if ((unsigned)l > i) l 401 lib/inflate.c l = i; l 402 lib/inflate.c *m = l; l 443 lib/inflate.c w = -l; /* bits decoded == (l * h) */ l 459 lib/inflate.c while (k > w + l) l 463 lib/inflate.c w += l; /* previous table always l bits */ l 466 lib/inflate.c z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */ l 503 lib/inflate.c r.b = (uch)l; /* bits to dump before this table */ l 506 lib/inflate.c j = i >> (w - l); /* (get around Turbo C bug) */ l 544 lib/inflate.c w -= l; l 771 lib/inflate.c unsigned *l; /* length list for huft_build */ l 775 lib/inflate.c l = malloc(sizeof(*l) * 288); l 776 lib/inflate.c if (l == NULL) l 781 lib/inflate.c l[i] = 8; l 783 lib/inflate.c l[i] = 9; l 785 lib/inflate.c l[i] = 7; l 787 lib/inflate.c l[i] = 8; l 789 lib/inflate.c if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) { l 790 lib/inflate.c free(l); l 796 lib/inflate.c l[i] = 5; l 798 lib/inflate.c if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1) l 801 lib/inflate.c free(l); l 810 lib/inflate.c free(l); l 815 lib/inflate.c free(l); l 830 lib/inflate.c unsigned l; /* last length */ l 910 lib/inflate.c i = l = 0; l 918 lib/inflate.c ll[i++] = l = j; /* save last length in l */ l 929 lib/inflate.c ll[i++] = l; l 942 lib/inflate.c l = 0; l 955 lib/inflate.c l = 0; l 1085 lib/vsprintf.c long *l = (long *) va_arg(args,long *); l 1086 lib/vsprintf.c *l = simple_strtol(str,&next,base); l 1088 lib/vsprintf.c unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); l 1089 lib/vsprintf.c *l = simple_strtoul(str,&next,base); l 1094 lib/vsprintf.c long long *l = (long long*) va_arg(args,long long *); l 1095 lib/vsprintf.c *l = simple_strtoll(str,&next,base); l 1097 lib/vsprintf.c unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); l 1098 lib/vsprintf.c *l = simple_strtoull(str,&next,base); l 2106 mm/mempolicy.c int l; l 2147 mm/mempolicy.c l = strlen(policy_types[mode]); l 2148 mm/mempolicy.c if (buffer + maxlen < p + l + 1) l 2152 mm/mempolicy.c p += l; l 112 mm/migrate.c list_for_each_entry_safe(page, page2, l, lru) { l 4329 mm/slab.c int l; l 4332 mm/slab.c l = n[1]; l 4334 mm/slab.c while (l) { l 4335 mm/slab.c int i = l/2; l 4342 mm/slab.c l = i; l 4345 mm/slab.c l -= i + 1; l 3454 mm/slub.c struct location *l; l 3459 mm/slub.c l = (void *)__get_free_pages(flags, order); l 3460 mm/slub.c if (!l) l 3464 mm/slub.c memcpy(l, t->loc, sizeof(struct location) * t->count); l 3468 mm/slub.c t->loc = l; l 3476 mm/slub.c struct location *l; l 3496 mm/slub.c l = &t->loc[pos]; l 3497 mm/slub.c l->count++; l 3499 mm/slub.c l->sum_time += age; l 3500 mm/slub.c if (age < l->min_time) l 3501 mm/slub.c l->min_time = age; l 3502 mm/slub.c if (age > l->max_time) l 3503 mm/slub.c l->max_time = age; l 3505 mm/slub.c if (track->pid < l->min_pid) l 3506 mm/slub.c l->min_pid = track->pid; l 3507 mm/slub.c if (track->pid > l->max_pid) l 3508 mm/slub.c l->max_pid = track->pid; l 3510 mm/slub.c cpu_set(track->cpu, l->cpus); l 3512 mm/slub.c node_set(page_to_nid(virt_to_page(track)), l->nodes); l 3528 mm/slub.c l = t->loc + pos; l 3530 mm/slub.c memmove(l + 1, l, l 3533 mm/slub.c l->count = 1; l 3534 mm/slub.c l->addr = track->addr; l 3535 mm/slub.c l->sum_time = age; l 3536 mm/slub.c l->min_time = age; l 3537 mm/slub.c l->max_time = age; l 3538 mm/slub.c l->min_pid = track->pid; l 3539 mm/slub.c l->max_pid = track->pid; l 3540 mm/slub.c cpus_clear(l->cpus); l 3541 mm/slub.c cpu_set(track->cpu, l->cpus); l 3542 mm/slub.c nodes_clear(l->nodes); l 3543 mm/slub.c node_set(page_to_nid(virt_to_page(track)), l->nodes); l 3595 mm/slub.c struct location *l = &t.loc[i]; l 3599 mm/slub.c len += sprintf(buf + len, "%7ld ", l->count); l 3601 mm/slub.c if (l->addr) l 3602 mm/slub.c len += sprint_symbol(buf + len, (unsigned long)l->addr); l 3606 mm/slub.c if (l->sum_time != l->min_time) { l 3608 mm/slub.c l->min_time, l 3609 mm/slub.c (long)div_u64(l->sum_time, l->count), l 3610 mm/slub.c l->max_time); l 3613 mm/slub.c l->min_time); l 3615 mm/slub.c if (l->min_pid != l->max_pid) l 3617 mm/slub.c l->min_pid, l->max_pid); l 3620 mm/slub.c l->min_pid); l 3622 mm/slub.c if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && l 3626 mm/slub.c l->cpus); l 3629 mm/slub.c if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && l 3633 mm/slub.c l->nodes); l 1352 mm/swapfile.c loff_t l = *pos; l 1356 mm/swapfile.c if (!l) l 1362 mm/swapfile.c if (!--l) l 803 mm/vmstat.c unsigned long *l = arg; l 804 mm/vmstat.c unsigned long off = l - (unsigned long *)m->private; l 806 mm/vmstat.c seq_printf(m, "%s %lu\n", vmstat_text[off], *l); l 32 net/appletalk/atalk_proc.c loff_t l = *pos; l 35 net/appletalk/atalk_proc.c return l ? atalk_get_interface_idx(--l) : SEQ_START_TOKEN; l 93 net/appletalk/atalk_proc.c loff_t l = *pos; l 96 net/appletalk/atalk_proc.c return l ? atalk_get_route_idx(--l) : SEQ_START_TOKEN; l 163 net/appletalk/atalk_proc.c loff_t l = *pos; l 166 net/appletalk/atalk_proc.c return l ? atalk_get_socket_idx(--l) : SEQ_START_TOKEN; l 1027 net/atm/lec.c --*l; l 1031 net/atm/lec.c if (--*l < 0) l 1036 net/atm/lec.c return (*l < 0) ? state : NULL; l 1046 net/atm/lec.c v = lec_tbl_walk(state, &priv->lec_arp_tables[p], l); l 1066 net/atm/lec.c v = lec_tbl_walk(state, lec_misc_tables[q], l); l 1081 net/atm/lec.c if (!lec_arp_walk(state, l, priv) && !lec_misc_walk(state, l, priv)) { l 1096 net/atm/lec.c v = (dev && dev->priv) ? lec_priv_walk(state, l, dev->priv) : NULL; l 1111 net/atm/lec.c v = lec_itf_walk(state, &l); l 98 net/atm/mpoa_proc.c loff_t l = *pos; l 101 net/atm/mpoa_proc.c if (!l--) l 104 net/atm/mpoa_proc.c if (!l--) l 89 net/atm/proc.c l--; l 93 net/atm/proc.c l -= compare_family(sk, family); l 94 net/atm/proc.c if (l < 0) l 104 net/atm/proc.c return (l < 0); l 109 net/atm/proc.c return __vcc_walk(&state->sk, state->family, &state->bucket, l) ? l 171 net/bluetooth/af_bluetooth.c write_lock_bh(&l->lock); l 172 net/bluetooth/af_bluetooth.c sk_add_node(sk, &l->head); l 173 net/bluetooth/af_bluetooth.c write_unlock_bh(&l->lock); l 179 net/bluetooth/af_bluetooth.c write_lock_bh(&l->lock); l 181 net/bluetooth/af_bluetooth.c write_unlock_bh(&l->lock); l 116 net/bluetooth/l2cap.c for (s = l->head; s; s = l2cap_pi(s)->next_c) { l 126 net/bluetooth/l2cap.c for (s = l->head; s; s = l2cap_pi(s)->next_c) { l 138 net/bluetooth/l2cap.c read_lock(&l->lock); l 139 net/bluetooth/l2cap.c s = __l2cap_get_chan_by_scid(l, cid); l 141 net/bluetooth/l2cap.c read_unlock(&l->lock); l 148 net/bluetooth/l2cap.c for (s = l->head; s; s = l2cap_pi(s)->next_c) { l 158 net/bluetooth/l2cap.c read_lock(&l->lock); l 159 net/bluetooth/l2cap.c s = __l2cap_get_chan_by_ident(l, ident); l 161 net/bluetooth/l2cap.c read_unlock(&l->lock); l 170 net/bluetooth/l2cap.c if(!__l2cap_get_chan_by_scid(l, cid)) l 181 net/bluetooth/l2cap.c if (l->head) l 182 net/bluetooth/l2cap.c l2cap_pi(l->head)->prev_c = sk; l 184 net/bluetooth/l2cap.c l2cap_pi(sk)->next_c = l->head; l 186 net/bluetooth/l2cap.c l->head = sk; l 193 net/bluetooth/l2cap.c write_lock_bh(&l->lock); l 194 net/bluetooth/l2cap.c if (sk == l->head) l 195 net/bluetooth/l2cap.c l->head = next; l 201 net/bluetooth/l2cap.c write_unlock_bh(&l->lock); l 208 net/bluetooth/l2cap.c struct l2cap_chan_list *l = &conn->chan_list; l 216 net/bluetooth/l2cap.c l2cap_pi(sk)->scid = l2cap_alloc_cid(l); l 229 net/bluetooth/l2cap.c __l2cap_chan_link(l, sk); l 348 net/bluetooth/l2cap.c struct l2cap_chan_list *l = &conn->chan_list; l 353 net/bluetooth/l2cap.c read_lock(&l->lock); l 355 net/bluetooth/l2cap.c for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { l 395 net/bluetooth/l2cap.c read_unlock(&l->lock); l 400 net/bluetooth/l2cap.c struct l2cap_chan_list *l = &conn->chan_list; l 405 net/bluetooth/l2cap.c read_lock(&l->lock); l 407 net/bluetooth/l2cap.c for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { l 420 net/bluetooth/l2cap.c read_unlock(&l->lock); l 426 net/bluetooth/l2cap.c struct l2cap_chan_list *l = &conn->chan_list; l 431 net/bluetooth/l2cap.c read_lock(&l->lock); l 433 net/bluetooth/l2cap.c for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { l 438 net/bluetooth/l2cap.c read_unlock(&l->lock); l 511 net/bluetooth/l2cap.c struct l2cap_chan_list *l = &conn->chan_list; l 512 net/bluetooth/l2cap.c write_lock_bh(&l->lock); l 514 net/bluetooth/l2cap.c write_unlock_bh(&l->lock); l 1288 net/bluetooth/l2cap.c struct l2cap_chan_list *l = &conn->chan_list; l 1294 net/bluetooth/l2cap.c read_lock(&l->lock); l 1295 net/bluetooth/l2cap.c for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { l 1309 net/bluetooth/l2cap.c read_unlock(&l->lock); l 2194 net/bluetooth/l2cap.c struct l2cap_chan_list *l; l 2201 net/bluetooth/l2cap.c l = &conn->chan_list; l 2205 net/bluetooth/l2cap.c read_lock(&l->lock); l 2207 net/bluetooth/l2cap.c for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { l 2257 net/bluetooth/l2cap.c read_unlock(&l->lock); l 2264 net/bluetooth/l2cap.c struct l2cap_chan_list *l; l 2271 net/bluetooth/l2cap.c l = &conn->chan_list; l 2275 net/bluetooth/l2cap.c read_lock(&l->lock); l 2277 net/bluetooth/l2cap.c for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { l 2329 net/bluetooth/l2cap.c read_unlock(&l->lock); l 55 net/core/utils.c unsigned long l; l 59 net/core/utils.c l = 0; l 62 net/core/utils.c l <<= 8; l 72 net/core/utils.c l |= val; l 77 net/core/utils.c return(htonl(l)); l 391 net/ethernet/eth.c size_t l; l 393 net/ethernet/eth.c l = _format_mac_addr(buf, PAGE_SIZE, addr, len); l 394 net/ethernet/eth.c l += strlcpy(buf + l, "\n", PAGE_SIZE - l); l 395 net/ethernet/eth.c return ((ssize_t) l); l 20 net/ipv4/ah4.c int l = iph->ihl*4 - sizeof(struct iphdr); l 23 net/ipv4/ah4.c while (l > 0) { l 28 net/ipv4/ah4.c l--; l 33 net/ipv4/ah4.c if (optlen<2 || optlen>l) l 51 net/ipv4/ah4.c l -= optlen; l 210 net/ipv4/fib_trie.c return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l); l 333 net/ipv4/fib_trie.c struct leaf *l = container_of(head, struct leaf, rcu); l 334 net/ipv4/fib_trie.c kmem_cache_free(trie_leaf_kmem, l); l 339 net/ipv4/fib_trie.c call_rcu_bh(&l->rcu, __leaf_free_rcu); l 390 net/ipv4/fib_trie.c struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); l 391 net/ipv4/fib_trie.c if (l) { l 392 net/ipv4/fib_trie.c l->parent = T_LEAF; l 393 net/ipv4/fib_trie.c INIT_HLIST_HEAD(&l->list); l 395 net/ipv4/fib_trie.c return l; l 910 net/ipv4/fib_trie.c struct hlist_head *head = &l->list; l 923 net/ipv4/fib_trie.c struct leaf_info *li = find_leaf_info(l, plen); l 1020 net/ipv4/fib_trie.c struct leaf *l; l 1076 net/ipv4/fib_trie.c l = (struct leaf *) n; l 1083 net/ipv4/fib_trie.c insert_leaf_info(&l->list, li); l 1086 net/ipv4/fib_trie.c l = leaf_new(); l 1088 net/ipv4/fib_trie.c if (!l) l 1091 net/ipv4/fib_trie.c l->key = key; l 1095 net/ipv4/fib_trie.c free_leaf(l); l 1100 net/ipv4/fib_trie.c insert_leaf_info(&l->list, li); l 1105 net/ipv4/fib_trie.c node_set_parent((struct node *)l, tp); l 1108 net/ipv4/fib_trie.c put_child(t, (struct tnode *)tp, cindex, (struct node *)l); l 1131 net/ipv4/fib_trie.c free_leaf(l); l 1138 net/ipv4/fib_trie.c put_child(t, tn, missbit, (struct node *)l); l 1176 net/ipv4/fib_trie.c struct leaf *l; l 1198 net/ipv4/fib_trie.c l = fib_find_node(t, key); l 1201 net/ipv4/fib_trie.c if (l) { l 1202 net/ipv4/fib_trie.c fa_head = get_fa_head(l, plen); l 1339 net/ipv4/fib_trie.c struct hlist_head *hhead = &l->list; l 1347 net/ipv4/fib_trie.c if (l->key != (key & ntohl(mask))) l 1351 net/ipv4/fib_trie.c htonl(l->key), mask, plen); l 1571 net/ipv4/fib_trie.c struct tnode *tp = node_parent((struct node *) l); l 1573 net/ipv4/fib_trie.c pr_debug("entering trie_leaf_remove(%p)\n", l); l 1576 net/ipv4/fib_trie.c t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits); l 1582 net/ipv4/fib_trie.c free_leaf(l); l 1596 net/ipv4/fib_trie.c struct leaf *l; l 1609 net/ipv4/fib_trie.c l = fib_find_node(t, key); l 1611 net/ipv4/fib_trie.c if (!l) l 1614 net/ipv4/fib_trie.c fa_head = get_fa_head(l, plen); l 1648 net/ipv4/fib_trie.c l = fib_find_node(t, key); l 1649 net/ipv4/fib_trie.c li = find_leaf_info(l, plen); l 1658 net/ipv4/fib_trie.c if (hlist_empty(&l->list)) l 1659 net/ipv4/fib_trie.c trie_leaf_remove(t, l); l 1690 net/ipv4/fib_trie.c struct hlist_head *lih = &l->list; l 1756 net/ipv4/fib_trie.c struct node *c = (struct node *) l; l 1767 net/ipv4/fib_trie.c struct leaf *l = trie_firstleaf(t); l 1769 net/ipv4/fib_trie.c while (l && index-- > 0) l 1770 net/ipv4/fib_trie.c l = trie_nextleaf(l); l 1772 net/ipv4/fib_trie.c return l; l 1782 net/ipv4/fib_trie.c struct leaf *l, *ll = NULL; l 1785 net/ipv4/fib_trie.c for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) { l 1786 net/ipv4/fib_trie.c found += trie_flush_leaf(l); l 1790 net/ipv4/fib_trie.c ll = l; l 1810 net/ipv4/fib_trie.c struct leaf *l; l 1818 net/ipv4/fib_trie.c l = fib_find_node(t, 0); l 1819 net/ipv4/fib_trie.c if (!l) l 1822 net/ipv4/fib_trie.c fa_head = get_fa_head(l, 0); l 1922 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(li, node, &l->list, hlist) { l 1934 net/ipv4/fib_trie.c if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) { l 1948 net/ipv4/fib_trie.c struct leaf *l; l 1958 net/ipv4/fib_trie.c l = trie_firstleaf(t); l 1963 net/ipv4/fib_trie.c l = fib_find_node(t, key); l 1964 net/ipv4/fib_trie.c if (!l) l 1965 net/ipv4/fib_trie.c l = trie_leafindex(t, count); l 1968 net/ipv4/fib_trie.c while (l) { l 1969 net/ipv4/fib_trie.c cb->args[2] = l->key; l 1970 net/ipv4/fib_trie.c if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { l 1977 net/ipv4/fib_trie.c l = trie_nextleaf(l); l 2119 net/ipv4/fib_trie.c struct leaf *l = (struct leaf *)n; l 2128 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(li, tmp, &l->list, hlist) l 2407 net/ipv4/fib_trie.c struct leaf *l = (struct leaf *) n; l 2410 net/ipv4/fib_trie.c __be32 val = htonl(l->key); l 2415 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(li, node, &l->list, hlist) { l 2467 net/ipv4/fib_trie.c struct leaf *l = NULL; l 2471 net/ipv4/fib_trie.c if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key))) l 2475 net/ipv4/fib_trie.c l = trie_firstleaf(t); l 2478 net/ipv4/fib_trie.c while (l && pos-- > 0) { l 2480 net/ipv4/fib_trie.c l = trie_nextleaf(l); l 2483 net/ipv4/fib_trie.c if (l) l 2488 net/ipv4/fib_trie.c return l; l 2512 net/ipv4/fib_trie.c struct leaf *l = v; l 2517 net/ipv4/fib_trie.c l = trie_firstleaf(iter->main_trie); l 2520 net/ipv4/fib_trie.c l = trie_nextleaf(l); l 2523 net/ipv4/fib_trie.c if (l) l 2524 net/ipv4/fib_trie.c iter->key = l->key; l 2527 net/ipv4/fib_trie.c return l; l 2559 net/ipv4/fib_trie.c struct leaf *l = v; l 2570 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(li, node, &l->list, hlist) { l 2575 net/ipv4/fib_trie.c prefix = htonl(l->key); l 188 net/ipv4/inetpeer.c struct inet_peer **nodep, *node, *l, *r; l 194 net/ipv4/inetpeer.c l = node->avl_left; l 196 net/ipv4/inetpeer.c lh = node_height(l); l 201 net/ipv4/inetpeer.c ll = l->avl_left; l 202 net/ipv4/inetpeer.c lr = l->avl_right; l 208 net/ipv4/inetpeer.c l->avl_left = ll; /* ll: RH+1 */ l 209 net/ipv4/inetpeer.c l->avl_right = node; /* node: RH+1 or RH+2 */ l 210 net/ipv4/inetpeer.c l->avl_height = node->avl_height + 1; l 211 net/ipv4/inetpeer.c *nodep = l; l 218 net/ipv4/inetpeer.c l->avl_left = ll; /* ll: RH */ l 219 net/ipv4/inetpeer.c l->avl_right = lrl; /* lrl: RH or RH-1 */ l 220 net/ipv4/inetpeer.c l->avl_height = rh + 1; /* l: RH+1 */ l 221 net/ipv4/inetpeer.c lr->avl_left = l; /* l: RH+1 */ l 234 net/ipv4/inetpeer.c node->avl_left = l; /* l: LH */ l 244 net/ipv4/inetpeer.c node->avl_left = l; /* l: LH */ l 215 net/ipv4/ip_options.c int l = opt->optlen; l 218 net/ipv4/ip_options.c while (l > 0) { l 223 net/ipv4/ip_options.c l--; l 228 net/ipv4/ip_options.c if (optlen<2 || optlen>l) l 232 net/ipv4/ip_options.c l -= optlen; l 252 net/ipv4/ip_options.c int l; l 266 net/ipv4/ip_options.c for (l = opt->optlen; l > 0; ) { l 269 net/ipv4/ip_options.c for (optptr++, l--; l>0; optptr++, l--) { l 277 net/ipv4/ip_options.c l--; l 282 net/ipv4/ip_options.c if (optlen<2 || optlen>l) { l 455 net/ipv4/ip_options.c l -= optlen; l 344 net/ipv4/netfilter/ipt_ULOG.c struct ipt_ulog_info l = { l 350 net/ipv4/netfilter/ipt_ULOG.c memcpy(l.prefix, cl->prefix, sizeof(l.prefix)); l 351 net/ipv4/netfilter/ipt_ULOG.c memcpy(dst, &l, sizeof(l)); l 356 net/ipv4/netfilter/ipt_ULOG.c const struct ipt_ulog_info *l = src; l 358 net/ipv4/netfilter/ipt_ULOG.c .nl_group = l->nl_group, l 359 net/ipv4/netfilter/ipt_ULOG.c .copy_range = l->copy_range, l 360 net/ipv4/netfilter/ipt_ULOG.c .qthreshold = l->qthreshold, l 363 net/ipv4/netfilter/ipt_ULOG.c memcpy(cl.prefix, l->prefix, sizeof(cl.prefix)); l 582 net/ipv4/netfilter/nf_nat_snmp_basic.c long l[0]; /* 32 bit signed */ l 682 net/ipv4/netfilter/nf_nat_snmp_basic.c long l; l 718 net/ipv4/netfilter/nf_nat_snmp_basic.c l = 0; l 722 net/ipv4/netfilter/nf_nat_snmp_basic.c if (!asn1_long_decode(ctx, end, &l)) { l 734 net/ipv4/netfilter/nf_nat_snmp_basic.c (*obj)->syntax.l[0] = l; l 48 net/ipv6/ip6_flowlabel.c #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK) l 258 net/ipv6/ndisc.c int l; l 261 net/ipv6/ndisc.c l = nd_opt->nd_opt_len << 3; l 262 net/ipv6/ndisc.c if (opt_len < l || l == 0) l 307 net/ipv6/ndisc.c opt_len -= l; l 308 net/ipv6/ndisc.c nd_opt = ((void *)nd_opt) + l; l 808 net/ipx/af_ipx.c __be32 *l; l 829 net/ipx/af_ipx.c l = (__be32 *) c; l 833 net/ipx/af_ipx.c if (*l++ == intrfc->if_netnum) l 851 net/ipx/af_ipx.c l = (__be32 *) c; l 855 net/ipx/af_ipx.c if (ifcs->if_netnum == *l++) l 39 net/ipx/ipx_proc.c loff_t l = *pos; l 42 net/ipx/ipx_proc.c return l ? ipx_get_interface_idx(--l) : SEQ_START_TOKEN; l 124 net/ipx/ipx_proc.c loff_t l = *pos; l 126 net/ipx/ipx_proc.c return l ? ipx_get_route_idx(--l) : SEQ_START_TOKEN; l 195 net/ipx/ipx_proc.c loff_t l = *pos; l 198 net/ipx/ipx_proc.c return l ? ipx_get_socket_idx(--l) : SEQ_START_TOKEN; l 1360 net/irda/ircomm/ircomm_tty.c int count = 0, l; l 1371 net/irda/ircomm/ircomm_tty.c l = ircomm_tty_line_info(self, buf + count); l 1372 net/irda/ircomm/ircomm_tty.c count += l; l 262 net/iucv/af_iucv.c write_lock_bh(&l->lock); l 263 net/iucv/af_iucv.c sk_add_node(sk, &l->head); l 264 net/iucv/af_iucv.c write_unlock_bh(&l->lock); l 269 net/iucv/af_iucv.c write_lock_bh(&l->lock); l 271 net/iucv/af_iucv.c write_unlock_bh(&l->lock); l 59 net/llc/llc_proc.c loff_t l = *pos; l 62 net/llc/llc_proc.c return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN; l 18 net/mac80211/michael.c mctx->l ^= val; l 19 net/mac80211/michael.c mctx->r ^= rol32(mctx->l, 17); l 20 net/mac80211/michael.c mctx->l += mctx->r; l 21 net/mac80211/michael.c mctx->r ^= ((mctx->l & 0xff00ff00) >> 8) | l 22 net/mac80211/michael.c ((mctx->l & 0x00ff00ff) << 8); l 23 net/mac80211/michael.c mctx->l += mctx->r; l 24 net/mac80211/michael.c mctx->r ^= rol32(mctx->l, 3); l 25 net/mac80211/michael.c mctx->l += mctx->r; l 26 net/mac80211/michael.c mctx->r ^= ror32(mctx->l, 2); l 27 net/mac80211/michael.c mctx->l += mctx->r; l 42 net/mac80211/michael.c mctx->l = get_unaligned_le32(key); l 84 net/mac80211/michael.c put_unaligned_le32(mctx.l, mic); l 18 net/mac80211/michael.h u32 l, r; l 137 net/mac80211/rc80211_pid_algo.c for (i = 0; i < l - 1; i++) l 465 net/mac80211/rx.c #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l)) l 66 net/netfilter/ipvs/ip_vs_conn.c rwlock_t l; l 75 net/netfilter/ipvs/ip_vs_conn.c read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 80 net/netfilter/ipvs/ip_vs_conn.c read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 85 net/netfilter/ipvs/ip_vs_conn.c write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 90 net/netfilter/ipvs/ip_vs_conn.c write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 95 net/netfilter/ipvs/ip_vs_conn.c read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 100 net/netfilter/ipvs/ip_vs_conn.c read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 105 net/netfilter/ipvs/ip_vs_conn.c write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 110 net/netfilter/ipvs/ip_vs_conn.c write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); l 778 net/netfilter/ipvs/ip_vs_conn.c struct list_head *e, *l = seq->private; l 786 net/netfilter/ipvs/ip_vs_conn.c if ((e = cp->c_list.next) != l) l 789 net/netfilter/ipvs/ip_vs_conn.c idx = l - ip_vs_conn_tab; l 806 net/netfilter/ipvs/ip_vs_conn.c struct list_head *l = seq->private; l 808 net/netfilter/ipvs/ip_vs_conn.c if (l) l 809 net/netfilter/ipvs/ip_vs_conn.c ct_read_unlock_bh(l - ip_vs_conn_tab); l 1087 net/netfilter/ipvs/ip_vs_conn.c rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); l 182 net/netfilter/nf_conntrack_h323_asn1.c unsigned int v, l; l 185 net/netfilter/nf_conntrack_h323_asn1.c l = b + bs->bit; l 187 net/netfilter/nf_conntrack_h323_asn1.c if (l < 8) { l 188 net/netfilter/nf_conntrack_h323_asn1.c v >>= 8 - l; l 189 net/netfilter/nf_conntrack_h323_asn1.c bs->bit = l; l 190 net/netfilter/nf_conntrack_h323_asn1.c } else if (l == 8) { l 197 net/netfilter/nf_conntrack_h323_asn1.c v >>= 16 - l; l 198 net/netfilter/nf_conntrack_h323_asn1.c bs->bit = l - 8; l 208 net/netfilter/nf_conntrack_h323_asn1.c unsigned int v, l, shift, bytes; l 213 net/netfilter/nf_conntrack_h323_asn1.c l = bs->bit + b; l 215 net/netfilter/nf_conntrack_h323_asn1.c if (l < 8) { l 217 net/netfilter/nf_conntrack_h323_asn1.c bs->bit = l; l 218 net/netfilter/nf_conntrack_h323_asn1.c } else if (l == 8) { l 222 net/netfilter/nf_conntrack_h323_asn1.c for (bytes = l >> 3, shift = 24, v = 0; bytes; l 226 net/netfilter/nf_conntrack_h323_asn1.c if (l < 32) { l 229 net/netfilter/nf_conntrack_h323_asn1.c } else if (l > 32) { l 234 net/netfilter/nf_conntrack_h323_asn1.c bs->bit = l & 0x7; l 464 net/netfilter/xt_hashlimit.c return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0; l 242 net/rose/rose_subr.c unsigned char l, lg, n = 0; l 268 net/rose/rose_subr.c l = p[1]; l 291 net/rose/rose_subr.c for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { l 298 net/rose/rose_subr.c p += l + 2; l 299 net/rose/rose_subr.c n += l + 2; l 300 net/rose/rose_subr.c len -= l + 2; l 310 net/rose/rose_subr.c unsigned char l, n = 0; l 334 net/rose/rose_subr.c l = p[1]; l 337 net/rose/rose_subr.c memcpy(callsign, p + 12, l - 10); l 338 net/rose/rose_subr.c callsign[l - 10] = '\0'; l 343 net/rose/rose_subr.c memcpy(callsign, p + 12, l - 10); l 344 net/rose/rose_subr.c callsign[l - 10] = '\0'; l 347 net/rose/rose_subr.c p += l + 2; l 348 net/rose/rose_subr.c n += l + 2; l 349 net/rose/rose_subr.c len -= l + 2; l 64 net/sched/cls_basic.c unsigned long l = 0UL; l 73 net/sched/cls_basic.c l = (unsigned long) f; l 75 net/sched/cls_basic.c return l; l 599 net/sunrpc/auth_gss/svcauth_gss.c int l; l 604 net/sunrpc/auth_gss/svcauth_gss.c l = round_up_to_quad(o->len); l 605 net/sunrpc/auth_gss/svcauth_gss.c if (argv->iov_len < l) l 608 net/sunrpc/auth_gss/svcauth_gss.c argv->iov_base += l; l 609 net/sunrpc/auth_gss/svcauth_gss.c argv->iov_len -= l; l 904 net/sunrpc/xdr.c unsigned int l = min(avail_page, l 919 net/sunrpc/xdr.c memcpy(c, elem + copied, l); l 920 net/sunrpc/xdr.c copied += l; l 924 net/sunrpc/xdr.c memcpy(elem + copied, c, l); l 925 net/sunrpc/xdr.c copied += l; l 933 net/sunrpc/xdr.c avail_page -= l; l 934 net/sunrpc/xdr.c c += l; l 944 net/sunrpc/xdr.c unsigned int l = min(avail_page, l 959 net/sunrpc/xdr.c memcpy(c, elem + copied, l); l 960 net/sunrpc/xdr.c copied += l; l 964 net/sunrpc/xdr.c memcpy(elem + copied, c, l); l 965 net/sunrpc/xdr.c copied += l; l 991 net/sunrpc/xdr.c unsigned int l = desc->elem_size - copied; l 994 net/sunrpc/xdr.c memcpy(c, elem + copied, l); l 996 net/sunrpc/xdr.c memcpy(elem + copied, c, l); l 1001 net/sunrpc/xdr.c todo -= l; l 1002 net/sunrpc/xdr.c c += l; l 1491 net/sunrpc/xprtrdma/verbs.c LIST_HEAD(l); l 1492 net/sunrpc/xprtrdma/verbs.c list_add(&seg->mr_chunk.rl_mw->r.fmr->list, &l); l 1493 net/sunrpc/xprtrdma/verbs.c rc = ib_unmap_fmr(&l); l 82 net/tipc/handler.c struct list_head *l, *n; l 85 net/tipc/handler.c list_for_each_safe(l, n, &signal_queue_head) { l 86 net/tipc/handler.c item = list_entry(l, struct queue_item, next_signal); l 112 net/tipc/handler.c struct list_head *l, *n; l 123 net/tipc/handler.c list_for_each_safe(l, n, &signal_queue_head) { l 124 net/tipc/handler.c item = list_entry(l, struct queue_item, next_signal); l 85 net/wanrouter/wanproc.c loff_t l = *pos; l 88 net/wanrouter/wanproc.c if (!l--) l 90 net/wanrouter/wanproc.c for (wandev = wanrouter_router_devlist; l-- && wandev; l 46 net/x25/x25_proc.c loff_t l = *pos; l 49 net/x25/x25_proc.c return l ? x25_get_route_idx(--l) : SEQ_START_TOKEN; l 112 net/x25/x25_proc.c loff_t l = *pos; l 115 net/x25/x25_proc.c return l ? x25_get_socket_idx(--l) : SEQ_START_TOKEN; l 192 net/x25/x25_proc.c loff_t l = *pos; l 195 net/x25/x25_proc.c return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN; l 2089 net/xfrm/xfrm_user.c size_t l = 0; l 2091 net/xfrm/xfrm_user.c l += nla_total_size(aead_len(x->aead)); l 2093 net/xfrm/xfrm_user.c l += nla_total_size(xfrm_alg_len(x->aalg)); l 2095 net/xfrm/xfrm_user.c l += nla_total_size(xfrm_alg_len(x->ealg)); l 2097 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(*x->calg)); l 2099 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(*x->encap)); l 2101 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + l 2104 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(*x->coaddr)); l 2107 net/xfrm/xfrm_user.c l += nla_total_size(sizeof(u64)); l 2109 net/xfrm/xfrm_user.c return l; l 53 scripts/kconfig/conf.c int l; l 57 scripts/kconfig/conf.c l = strlen(p); l 59 scripts/kconfig/conf.c memmove(str, p, l + 1); l 60 scripts/kconfig/conf.c if (!l) l 62 scripts/kconfig/conf.c p = str + l - 1; l 399 scripts/kconfig/confdata.c int type, l; l 499 scripts/kconfig/confdata.c l = strcspn(str, "\"\\"); l 500 scripts/kconfig/confdata.c if (l) { l 501 scripts/kconfig/confdata.c fwrite(str, l, 1, out); l 502 scripts/kconfig/confdata.c str += l; l 676 scripts/kconfig/confdata.c int i, l; l 737 scripts/kconfig/confdata.c l = strcspn(str, "\"\\"); l 738 scripts/kconfig/confdata.c if (l) { l 739 scripts/kconfig/confdata.c fwrite(str, l, 1, out); l 740 scripts/kconfig/confdata.c fwrite(str, l, 1, out_h); l 741 scripts/kconfig/confdata.c str += l; l 52 scripts/kconfig/expr.h for (e = (l); e && (s = e->right.sym); e = e->left.expr) l 106 scripts/kconfig/util.c size_t l; l 108 scripts/kconfig/util.c l = strlen(gs->s) + strlen(s) + 1; l 109 scripts/kconfig/util.c if (l > gs->len) { l 110 scripts/kconfig/util.c gs->s = realloc(gs->s, l); l 111 scripts/kconfig/util.c gs->len = l; l 37 security/selinux/ss/mls.c int i, l, len, head, prev; l 46 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 47 security/selinux/ss/mls.c int index_sens = context->range.level[l].sens; l 53 security/selinux/ss/mls.c e = &context->range.level[l].cat; l 71 security/selinux/ss/mls.c if (l == 0) { l 92 security/selinux/ss/mls.c int i, l, head, prev; l 104 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 106 security/selinux/ss/mls.c policydb.p_sens_val_to_name[context->range.level[l].sens - 1]); l 112 security/selinux/ss/mls.c e = &context->range.level[l].cat; l 147 security/selinux/ss/mls.c if (l == 0) { l 166 security/selinux/ss/mls.c if (!l->sens || l->sens > p->p_levels.nprim) l 169 security/selinux/ss/mls.c p->p_sens_val_to_name[l->sens - 1]); l 173 security/selinux/ss/mls.c ebitmap_for_each_positive_bit(&l->cat, node, i) { l 254 security/selinux/ss/mls.c int l, rc = -EINVAL; l 289 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 296 security/selinux/ss/mls.c context->range.level[l].sens = levdatum->level->sens; l 322 security/selinux/ss/mls.c rc = ebitmap_set_bit(&context->range.level[l].cat, l 343 security/selinux/ss/mls.c rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1); l 366 security/selinux/ss/mls.c if (l == 0) { l 413 security/selinux/ss/mls.c int l, rc = 0; l 416 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 417 security/selinux/ss/mls.c context->range.level[l].sens = range->level[l].sens; l 418 security/selinux/ss/mls.c rc = ebitmap_cpy(&context->range.level[l].cat, l 419 security/selinux/ss/mls.c &range->level[l].cat); l 478 security/selinux/ss/mls.c int l, i; l 483 security/selinux/ss/mls.c for (l = 0; l < 2; l++) { l 485 security/selinux/ss/mls.c oldp->p_sens_val_to_name[c->range.level[l].sens - 1]); l 489 security/selinux/ss/mls.c c->range.level[l].sens = levdatum->level->sens; l 492 security/selinux/ss/mls.c ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) { l 503 security/selinux/ss/mls.c ebitmap_destroy(&c->range.level[l].cat); l 504 security/selinux/ss/mls.c c->range.level[l].cat = bitmap; l 1651 security/selinux/ss/policydb.c struct ocontext *l, *c, *newc; l 1869 security/selinux/ss/policydb.c l = NULL; l 1876 security/selinux/ss/policydb.c if (l) l 1877 security/selinux/ss/policydb.c l->next = c; l 1880 security/selinux/ss/policydb.c l = c; l 2052 security/selinux/ss/policydb.c for (l = NULL, c = newgenfs->head; c; l 2053 security/selinux/ss/policydb.c l = c, c = c->next) { l 2069 security/selinux/ss/policydb.c if (l) l 2070 security/selinux/ss/policydb.c l->next = newc; l 122 sound/aoa/codecs/snd-aoa-codec-onyx.c s8 l, r; l 125 sound/aoa/codecs/snd-aoa-codec-onyx.c onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l); l 129 sound/aoa/codecs/snd-aoa-codec-onyx.c ucontrol->value.integer.value[0] = l + VOLUME_RANGE_SHIFT; l 139 sound/aoa/codecs/snd-aoa-codec-onyx.c s8 l, r; l 149 sound/aoa/codecs/snd-aoa-codec-onyx.c onyx_read_register(onyx, ONYX_REG_DAC_ATTEN_LEFT, &l); l 152 sound/aoa/codecs/snd-aoa-codec-onyx.c if (l + VOLUME_RANGE_SHIFT == ucontrol->value.integer.value[0] && l 526 sound/aoa/fabrics/snd-aoa-fabric-layout.c struct layout *l; l 528 sound/aoa/fabrics/snd-aoa-fabric-layout.c l = layouts; l 529 sound/aoa/fabrics/snd-aoa-fabric-layout.c while (l->layout_id) { l 530 sound/aoa/fabrics/snd-aoa-fabric-layout.c if (l->layout_id == id) l 531 sound/aoa/fabrics/snd-aoa-fabric-layout.c return l; l 532 sound/aoa/fabrics/snd-aoa-fabric-layout.c l++; l 542 sound/aoa/fabrics/snd-aoa-fabric-layout.c if (l->codecs[i].name) { l 543 sound/aoa/fabrics/snd-aoa-fabric-layout.c request_module("snd-aoa-codec-%s", l->codecs[i].name); l 1012 sound/core/pcm_lib.c snd_pcm_hw_rule_list, l, l 1088 sound/core/pcm_lib.c unsigned int l = (unsigned long) rule->private; l 1089 sound/core/pcm_lib.c int width = l & 0xffff; l 1090 sound/core/pcm_lib.c unsigned int msbits = l >> 16; l 1109 sound/core/pcm_lib.c unsigned long l = (msbits << 16) | width; l 1112 sound/core/pcm_lib.c (void*) l, l 49 sound/core/pcm_timer.c unsigned long rate, mult, fsize, l, post; l 56 sound/core/pcm_timer.c l = gcd(mult, rate); l 57 sound/core/pcm_timer.c mult /= l; l 58 sound/core/pcm_timer.c rate /= l; l 62 sound/core/pcm_timer.c l = gcd(rate, fsize); l 63 sound/core/pcm_timer.c rate /= l; l 64 sound/core/pcm_timer.c fsize /= l; l 210 sound/core/seq/oss/seq_oss_event.c if (q->l.chn >= 32) l 212 sound/core/seq/oss/seq_oss_event.c switch (q->l.cmd) { l 214 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_PGMCHANGE, l 215 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.p1, ev); l 218 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_CONTROLLER, l 219 sound/core/seq/oss/seq_oss_event.c q->l.chn, q->l.p1, q->l.val, ev); l 223 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_PITCHBEND, l 224 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.val - 8192, ev); l 227 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_CHANPRESS, l 228 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.val, ev); l 95 sound/core/seq/oss/seq_oss_event.h struct evrec_long l; l 562 sound/core/seq/oss/seq_oss_midi.c ossev.l.cmd = MIDI_CTL_CHANGE; break; l 564 sound/core/seq/oss/seq_oss_midi.c ossev.l.cmd = MIDI_PGM_CHANGE; break; l 566 sound/core/seq/oss/seq_oss_midi.c ossev.l.cmd = MIDI_CHN_PRESSURE; break; l 568 sound/core/seq/oss/seq_oss_midi.c ossev.l.cmd = MIDI_PITCH_BEND; break; l 587 sound/core/seq/oss/seq_oss_midi.c ossev.l.code = EV_CHN_COMMON; l 588 sound/core/seq/oss/seq_oss_midi.c ossev.l.p1 = ev->data.control.param; l 589 sound/core/seq/oss/seq_oss_midi.c ossev.l.val = ev->data.control.value; l 590 sound/core/seq/oss/seq_oss_midi.c ossev.l.chn = ev->data.control.channel; l 593 sound/core/seq/oss/seq_oss_midi.c ossev.l.code = EV_CHN_COMMON; l 594 sound/core/seq/oss/seq_oss_midi.c ossev.l.val = ev->data.control.value + 8192; l 595 sound/core/seq/oss/seq_oss_midi.c ossev.l.chn = ev->data.control.channel; l 52 sound/drivers/vx/vx_mixer.c u32 l; l 56 sound/drivers/vx/vx_mixer.c u16 l; l 66 sound/drivers/vx/vx_mixer.c u16 l; l 81 sound/drivers/vx/vx_mixer.c #define SET_CDC_DATA_INIT(di) ((di).l = 0L, SET_CDC_DATA_SEL(di,XX_CODEC_SELECTOR)) l 96 sound/drivers/vx/vx_mixer.c vx_write_codec_reg(chip, codec, data.l); l 236 sound/isa/gus/gus_main.c int l, idx, local; l 259 sound/isa/gus/gus_main.c for (l = 0, local = gus->gf1.memory; l < 4; l++, local -= 256 * 1024) { l 260 sound/isa/gus/gus_main.c gus->gf1.mem_alloc.banks_8[l].address = l 261 sound/isa/gus/gus_main.c gus->gf1.mem_alloc.banks_8[l].size = 0; l 262 sound/isa/gus/gus_main.c gus->gf1.mem_alloc.banks_16[l].address = l << 18; l 263 sound/isa/gus/gus_main.c gus->gf1.mem_alloc.banks_16[l].size = local > 0 ? 256 * 1024 : 0; l 736 sound/isa/wavefront/wavefront_synth.c int i, x, l, cnt; l 749 sound/isa/wavefront/wavefront_synth.c for (l = 0; l < WF_NUM_LAYERS; l++) { l 750 sound/isa/wavefront/wavefront_synth.c if (prog.layer[l].mute) { l 752 sound/isa/wavefront/wavefront_synth.c [prog.layer[l].patch_number] |= l 202 sound/mips/hal2.c int l, r; l 208 sound/mips/hal2.c l = 0; l 211 sound/mips/hal2.c l = 31 - ((tmp >> H2I_C2_L_ATT_SHIFT) & 31); l 217 sound/mips/hal2.c l = (tmp >> H2I_C2_L_GAIN_SHIFT) & 15; l 221 sound/mips/hal2.c ucontrol->value.integer.value[0] = l; l 232 sound/mips/hal2.c int l, r; l 234 sound/mips/hal2.c l = ucontrol->value.integer.value[0]; l 241 sound/mips/hal2.c if (l | r) { l 242 sound/mips/hal2.c l = 31 - l; l 244 sound/mips/hal2.c new |= (l << H2I_C2_L_ATT_SHIFT); l 253 sound/mips/hal2.c new |= (l << H2I_C2_L_GAIN_SHIFT); l 417 sound/mips/sgio2audio.c s64 l, r; l 443 sound/mips/sgio2audio.c l = src[0]; /* sign extend */ l 446 sound/mips/sgio2audio.c *dst = ((l & 0x00ffffff) << CHANNEL_LEFT_SHIFT) | l 705 sound/oss/ac97_codec.c struct list_head *l; l 781 sound/oss/ac97_codec.c list_for_each(l, &codec_drivers) { l 782 sound/oss/ac97_codec.c d = list_entry(l, struct ac97_driver, list); l 138 sound/oss/audio.c int l; l 147 sound/oss/audio.c if ((l = dmap->user_counter % dmap->fragment_size) > 0) l 152 sound/oss/audio.c len = dmap->fragment_size - l; l 224 sound/oss/audio.c int c, p, l, buf_size, used, returned; l 259 sound/oss/audio.c l = c; l 261 sound/oss/audio.c if (l > buf_size) l 262 sound/oss/audio.c l = buf_size; l 264 sound/oss/audio.c returned = l; l 265 sound/oss/audio.c used = l; l 268 sound/oss/audio.c if ((dma_buf + l) > l 271 sound/oss/audio.c printk(KERN_ERR "audio: Buffer error 3 (%lx,%d), (%lx, %d)\n", (long) dma_buf, l, (long) audio_devs[dev]->dmap_out->raw_buf, (int) audio_devs[dev]->dmap_out->buffsize); l 279 sound/oss/audio.c if(copy_from_user(dma_buf, &(buf)[p], l)) l 287 sound/oss/audio.c l); l 288 sound/oss/audio.c l = returned; l 292 sound/oss/audio.c translate_bytes(ulaw_dsp, (unsigned char *) dma_buf, l); l 296 sound/oss/audio.c DMAbuf_move_wrpointer(dev, l); l 305 sound/oss/audio.c int c, p, l; l 326 sound/oss/audio.c if ((buf_no = DMAbuf_getrdbuffer(dev, &dmabuf, &l, !!(file->f_flags & O_NONBLOCK))) < 0) l 340 sound/oss/audio.c if (l > c) l 341 sound/oss/audio.c l = c; l 349 sound/oss/audio.c translate_bytes(dsp_ulaw, (unsigned char *) dmabuf, l); l 355 sound/oss/audio.c if(copy_to_user(&(buf)[p], fixit, l)) l 359 sound/oss/audio.c DMAbuf_rmchars(dev, buf_no, l); l 361 sound/oss/audio.c p += l; l 362 sound/oss/audio.c c -= l; l 875 sound/oss/dmabuf.c dmap->user_counter += l; l 17 sound/oss/hex2hex.c int l=0, c, i; l 51 sound/oss/hex2hex.c if (addr > l) l 52 sound/oss/hex2hex.c l = addr; l 65 sound/oss/hex2hex.c return l; l 71 sound/oss/hex2hex.c int i,l; l 86 sound/oss/hex2hex.c l = loadhex(stdin, buf); l 89 sound/oss/hex2hex.c printf("static int %s_len = %d;\n", varline, l); l 92 sound/oss/hex2hex.c for (i=0;i<l;i++) l 143 sound/oss/msnd.h #define HIWORD(l) ((WORD)((((DWORD)(l)) >> 16) & 0xFFFF)) l 144 sound/oss/msnd.h #define LOWORD(l) ((WORD)(DWORD)(l)) l 714 sound/oss/sequencer.c int i, l = 0; l 724 sound/oss/sequencer.c l = 0; l 726 sound/oss/sequencer.c l = i + 1; l 730 sound/oss/sequencer.c if (l > 0) l 731 sound/oss/sequencer.c synth_devs[dev]->send_sysex(dev, buf, l); l 920 sound/oss/sscape.c int len,l; l 936 sound/oss/sscape.c if (len > devc -> buffsize) l = devc->buffsize; l 937 sound/oss/sscape.c else l = len; l 938 sound/oss/sscape.c len -= l; l 939 sound/oss/sscape.c memcpy(devc->raw_buf, dt, l); dt += l; l 940 sound/oss/sscape.c sscape_start_dma(devc->dma, devc->raw_buf_phys, l, 0x48); l 1172 sound/oss/swarm_cs4297a.c unsigned char l, r, rl, rr, vidx; l 1329 sound/oss/swarm_cs4297a.c l = val & 0xff; l 1330 sound/oss/swarm_cs4297a.c if (l > 100) l 1331 sound/oss/swarm_cs4297a.c l = 100; // Max soundcard.h vol is 100. l 1332 sound/oss/swarm_cs4297a.c if (l < 6) { l 1334 sound/oss/swarm_cs4297a.c l = 0; l 1336 sound/oss/swarm_cs4297a.c rl = attentbl[(10 * l) / 100]; // Convert 0-100 vol to 63-0 atten. l 1358 sound/oss/swarm_cs4297a.c s->mix.vol[8] = ((unsigned int) r << 8) | l; l 1367 sound/oss/swarm_cs4297a.c l = val & 0xff; l 1368 sound/oss/swarm_cs4297a.c if (l > 100) l 1369 sound/oss/swarm_cs4297a.c l = 100; l 1370 sound/oss/swarm_cs4297a.c if (l < 3) { l 1372 sound/oss/swarm_cs4297a.c l = 0; l 1374 sound/oss/swarm_cs4297a.c rl = (l * 2 - 5) / 13; // Convert 0-100 range to 0-15. l 1375 sound/oss/swarm_cs4297a.c l = (rl * 13 + 5) / 2; l 1388 sound/oss/swarm_cs4297a.c s->mix.vol[6] = l << 8; l 1397 sound/oss/swarm_cs4297a.c l = val & 0xff; l 1398 sound/oss/swarm_cs4297a.c if (l > 100) l 1399 sound/oss/swarm_cs4297a.c l = 100; l 1403 sound/oss/swarm_cs4297a.c rl = (l * 2 - 5) / 13; // Convert 0-100 scale to 0-15. l 1414 sound/oss/swarm_cs4297a.c s->mix.vol[7] = ((unsigned int) r << 8) | l; l 1423 sound/oss/swarm_cs4297a.c l = val & 0xff; l 1424 sound/oss/swarm_cs4297a.c if (l > 100) l 1425 sound/oss/swarm_cs4297a.c l = 100; l 1426 sound/oss/swarm_cs4297a.c if (l < 1) { l 1427 sound/oss/swarm_cs4297a.c l = 0; l 1430 sound/oss/swarm_cs4297a.c rl = ((unsigned) l * 5 - 4) / 16; // Convert 0-100 range to 0-31. l 1431 sound/oss/swarm_cs4297a.c l = (rl * 16 + 4) / 5; l 1454 sound/oss/swarm_cs4297a.c l = val & 0xff; l 1455 sound/oss/swarm_cs4297a.c if (l > 100) l 1456 sound/oss/swarm_cs4297a.c l = 100; l 1462 sound/oss/swarm_cs4297a.c rl = (l * 2 - 11) / 3; // Convert 0-100 range to 0-63. l 1479 sound/oss/swarm_cs4297a.c s->mix.vol[4] = (r << 8) | l; l 1495 sound/oss/swarm_cs4297a.c l = val & 0xff; l 1496 sound/oss/swarm_cs4297a.c if (l > 100) l 1497 sound/oss/swarm_cs4297a.c l = 100; l 1498 sound/oss/swarm_cs4297a.c if (l < 1) { l 1499 sound/oss/swarm_cs4297a.c l = 0; l 1502 sound/oss/swarm_cs4297a.c rl = (attentbl[(l * 10) / 100]) >> 1; l 1520 sound/oss/swarm_cs4297a.c s->mix.vol[vidx - 1] = ((unsigned int) r << 8) | l; l 1934 sound/oss/waveartist.c unsigned int val, l, r; l 1939 sound/oss/waveartist.c l = val & 0x7f; l 1941 sound/oss/waveartist.c val = (l + r) / 2; l 815 sound/pci/au88x0/au88x0_a3d.c int l, r; l 817 sound/pci/au88x0/au88x0_a3d.c l = ucontrol->value.integer.value[0]; l 819 sound/pci/au88x0/au88x0_a3d.c vortex_a3d_coord2ild(a->ild, l, r); l 821 sound/pci/au88x0/au88x0_a3d.c a3dsrc_SetGainTarget(a, l, r); l 822 sound/pci/au88x0/au88x0_a3d.c a3dsrc_SetGainCurrent(a, l, r); l 1455 sound/pci/emu10k1/emufx.c int j, k, l, d; l 1458 sound/pci/emu10k1/emufx.c l = 0xe0 + (z * 8) + (j * 4); l 1469 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(l+1), A_GPR(l), A_GPR(l+1), A_GPR(TREBLE_GPR + 4 + j)); l 1470 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(l), A_GPR(k+2), A_GPR(l), A_GPR(TREBLE_GPR + 2 + j)); l 1471 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(l+3), A_GPR(l+2), A_GPR(l+3), A_GPR(TREBLE_GPR + 8 + j)); l 1472 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMAC0, A_GPR(l+2), A_GPR_ACCU, A_GPR(l+2), A_GPR(TREBLE_GPR + 6 + j)); l 1473 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACINT0, A_GPR(l+2), A_C_00000000, A_GPR(l+2), A_C_00000010); l 1475 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iACC3, A_GPR(d), A_GPR(l+2), A_C_00000000, A_C_00000000); l 2165 sound/pci/emu10k1/emufx.c int j, k, l, d; l 2168 sound/pci/emu10k1/emufx.c l = 0xd0 + (z * 8) + (j * 4); l 2179 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(l+1), GPR(l), GPR(l+1), GPR(TREBLE_GPR + 4 + j)); l 2180 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(l), GPR(k+2), GPR(l), GPR(TREBLE_GPR + 2 + j)); l 2181 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(l+3), GPR(l+2), GPR(l+3), GPR(TREBLE_GPR + 8 + j)); l 2182 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMAC0, GPR(l+2), GPR_ACCU, GPR(l+2), GPR(TREBLE_GPR + 6 + j)); l 2183 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACINT0, GPR(l+2), C_00000000, GPR(l+2), C_00000010); l 2185 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iACC3, GPR(d), GPR(l+2), C_00000000, C_00000000); l 77 sound/pci/emu10k1/memory.c #define get_emu10k1_memblk(l,member) list_entry(l, struct snd_emu10k1_memblk, member) l 307 sound/pci/korg1212/korg1212.c } l; l 955 sound/pci/korg1212/korg1212.c sensVals.l.v.leftChanId = SET_SENS_LEFTCHANID; l 957 sound/pci/korg1212/korg1212.c sensVals.l.v.leftChanVal = korg1212->leftADCInSens; l 976 sound/pci/korg1212/korg1212.c if (sensVals.l.leftSensBits & (0x0001 << bitPosition)) l 942 sound/ppc/pmac.c unsigned int l; l 1082 sound/ppc/pmac.c prop = of_get_property(sound, "sample-rates", &l); l 1084 sound/ppc/pmac.c prop = of_get_property(sound, "output-frame-rates", &l); l 1088 sound/ppc/pmac.c for (l /= sizeof(int); l > 0; --l) { l 179 sound/soc/codecs/wm9712.c u16 l, r, beep, line, phone, mic, pcm, aux; l 181 sound/soc/codecs/wm9712.c l = ac97_read(w->codec, HPL_MIXER); l 190 sound/soc/codecs/wm9712.c if (l & 0x1 || r & 0x1) l 195 sound/soc/codecs/wm9712.c if (l & 0x2 || r & 0x2) l 200 sound/soc/codecs/wm9712.c if (l & 0x4 || r & 0x4) l 205 sound/soc/codecs/wm9712.c if (l & 0x8 || r & 0x8) l 210 sound/soc/codecs/wm9712.c if (l & 0x10 || r & 0x10) l 215 sound/soc/codecs/wm9712.c if (l & 0x20 || r & 0x20) l 217 sound/soc/codecs/wm9713.c u16 l, r, beep, tone, phone, rec, pcm, aux; l 219 sound/soc/codecs/wm9713.c l = ac97_read(w->codec, HPL_MIXER); l 230 sound/soc/codecs/wm9713.c if ((l & 0x1) || (r & 0x1)) l 235 sound/soc/codecs/wm9713.c if ((l & 0x2) || (r & 0x2)) l 240 sound/soc/codecs/wm9713.c if ((l & 0x4) || (r & 0x4)) l 245 sound/soc/codecs/wm9713.c if ((l & 0x8) || (r & 0x8)) l 250 sound/soc/codecs/wm9713.c if ((l & 0x10) || (r & 0x10)) l 255 sound/soc/codecs/wm9713.c if ((l & 0x20) || (r & 0x20)) l 13 sound/sound_firmware.c long l; l 23 sound/sound_firmware.c l = filp->f_path.dentry->d_inode->i_size; l 24 sound/sound_firmware.c if (l <= 0 || l > 131072) l 30 sound/sound_firmware.c dp = vmalloc(l); l 38 sound/sound_firmware.c if (vfs_read(filp, dp, l, &pos) != l) l 47 sound/sound_firmware.c return (int) l; l 36 sound/usb/usx2y/usb_stream.c unsigned l = 0; l 45 sound/usb/usx2y/usb_stream.c l = usb_stream_next_packet_size(sk); l 46 sound/usb/usx2y/usb_stream.c if (s->idle_outsize + urb->transfer_buffer_length + l > l 52 sound/usb/usx2y/usb_stream.c urb->iso_frame_desc[pack].length = l; l 53 sound/usb/usx2y/usb_stream.c urb->transfer_buffer_length += l; l 284 sound/usb/usx2y/usb_stream.c int p, l = 0; l 294 sound/usb/usx2y/usb_stream.c l = id->actual_length; l 296 sound/usb/usx2y/usb_stream.c od[p].length = l; l 298 sound/usb/usx2y/usb_stream.c io->transfer_buffer_length += l; l 304 sound/usb/usx2y/usb_stream.c l = inurb->iso_frame_desc[s->sync_packet].actual_length; l 306 sound/usb/usx2y/usb_stream.c if (s->idle_outsize + io->transfer_buffer_length + l > l 310 sound/usb/usx2y/usb_stream.c od[p].length = l; l 312 sound/usb/usx2y/usb_stream.c io->transfer_buffer_length += l; l 321 sound/usb/usx2y/usb_stream.c s->idle_outsize + io->transfer_buffer_length + l, l 322 sound/usb/usx2y/usb_stream.c s->idle_outsize, io->transfer_buffer_length, l, l 384 sound/usb/usx2y/usb_stream.c int il, ol, l, p; l 390 sound/usb/usx2y/usb_stream.c l = 0; l 399 sound/usb/usx2y/usb_stream.c for (; p < iu->number_of_packets && l < s->period_size; ++p) { l 402 sound/usb/usx2y/usb_stream.c if (l + il > s->period_size) l 403 sound/usb/usx2y/usb_stream.c il = s->period_size - l; l 416 sound/usb/usx2y/usb_stream.c l += il; l 419 sound/usb/usx2y/usb_stream.c if (l != s->period_size) l 421 sound/usb/usx2y/usb_stream.c l/(int)s->cfg.frame_size); l 442 sound/usb/usx2y/usb_stream.c int l, p; l 453 sound/usb/usx2y/usb_stream.c l = id[p].actual_length; l 454 sound/usb/usx2y/usb_stream.c if (unlikely(l == 0 || id[p].status)) { l 466 sound/usb/usx2y/usb_stream.c s->inpacket[s->inpacket_head].length = l; l 467 sound/usb/usx2y/usb_stream.c if (insize + l > s->period_size && l 472 sound/usb/usx2y/usb_stream.c insize += l; l 473 sound/usb/usx2y/usb_stream.c urb_size += l; l 483 sound/usb/usx2y/usb_stream.c l = s->idle_outsize; l 485 sound/usb/usx2y/usb_stream.c sk->write_page) - l; l 490 sound/usb/usx2y/usb_stream.c s->outpacket[0].length = sk->idle_outurb->transfer_buffer_length + l; l 525 sound/usb/usx2y/usb_stream.c int l, p, max_diff, max_diff_0; l 542 sound/usb/usx2y/usb_stream.c l = inurb->iso_frame_desc[p].actual_length; l 543 sound/usb/usx2y/usb_stream.c urb_size += l; l 568 sound/usb/usx2y/usb_stream.c l = s->idle_insize; l 569 sound/usb/usx2y/usb_stream.c while (l > s->inpacket[split].length) { l 570 sound/usb/usx2y/usb_stream.c l -= s->inpacket[split].length; l 578 sound/usb/usx2y/usb_stream.c s->inpacket[split].length - l; l 614 sound/usb/usx2y/usb_stream.c int l = id[p].actual_length; l 615 sound/usb/usx2y/usb_stream.c if (l < s->cfg.frame_size) { l 618 sound/usb/usx2y/usb_stream.c snd_printk(KERN_WARNING "%i\n", l); l 626 sound/usb/usx2y/usb_stream.c s->inpacket[s->inpacket_head].length = l; l 633 sound/usb/usx2y/usb_stream.c int l = urb->iso_frame_desc[pack].actual_length; l 634 sound/usb/usx2y/usb_stream.c printk(" %i", l);