h 328 arch/x86/kernel/amd_iommu_init.c u8 *p = (void *)h, *end = (void *)h;
h 331 arch/x86/kernel/amd_iommu_init.c p += sizeof(*h);
h 332 arch/x86/kernel/amd_iommu_init.c end += h->length;
h 334 arch/x86/kernel/amd_iommu_init.c find_last_devid_on_pci(PCI_BUS(h->devid),
h 335 arch/x86/kernel/amd_iommu_init.c PCI_SLOT(h->devid),
h 336 arch/x86/kernel/amd_iommu_init.c PCI_FUNC(h->devid),
h 337 arch/x86/kernel/amd_iommu_init.c h->cap_ptr);
h 369 arch/x86/kernel/amd_iommu_init.c struct ivhd_header *h;
h 385 arch/x86/kernel/amd_iommu_init.c h = (struct ivhd_header *)p;
h 386 arch/x86/kernel/amd_iommu_init.c switch (h->type) {
h 388 arch/x86/kernel/amd_iommu_init.c find_last_devid_from_ivhd(h);
h 393 arch/x86/kernel/amd_iommu_init.c p += h->length;
h 559 arch/x86/kernel/amd_iommu_init.c u8 *p = (u8 *)h;
h 570 arch/x86/kernel/amd_iommu_init.c h->flags & IVHD_FLAG_HT_TUN_EN ?
h 574 arch/x86/kernel/amd_iommu_init.c h->flags & IVHD_FLAG_PASSPW_EN ?
h 578 arch/x86/kernel/amd_iommu_init.c h->flags & IVHD_FLAG_RESPASSPW_EN ?
h 582 arch/x86/kernel/amd_iommu_init.c h->flags & IVHD_FLAG_ISOC_EN ?
h 595 arch/x86/kernel/amd_iommu_init.c end += h->length;
h 700 arch/x86/kernel/amd_iommu_init.c iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
h 704 arch/x86/kernel/amd_iommu_init.c iommu->cap_ptr = h->cap_ptr;
h 705 arch/x86/kernel/amd_iommu_init.c iommu->pci_seg = h->pci_seg;
h 706 arch/x86/kernel/amd_iommu_init.c iommu->mmio_phys = h->mmio_phys;
h 707 arch/x86/kernel/amd_iommu_init.c iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys);
h 723 arch/x86/kernel/amd_iommu_init.c init_iommu_from_acpi(iommu, h);
h 736 arch/x86/kernel/amd_iommu_init.c struct ivhd_header *h;
h 744 arch/x86/kernel/amd_iommu_init.c h = (struct ivhd_header *)p;
h 750 arch/x86/kernel/amd_iommu_init.c ret = init_iommu_one(iommu, h);
h 757 arch/x86/kernel/amd_iommu_init.c p += h->length;
h 827 arch/x86/kernel/apic_32.c unsigned int l, h;
h 829 arch/x86/kernel/apic_32.c rdmsr(MSR_IA32_APICBASE, l, h);
h 831 arch/x86/kernel/apic_32.c wrmsr(MSR_IA32_APICBASE, l, h);
h 1185 arch/x86/kernel/apic_32.c u32 h, l, features;
h 1221 arch/x86/kernel/apic_32.c rdmsr(MSR_IA32_APICBASE, l, h);
h 1227 arch/x86/kernel/apic_32.c wrmsr(MSR_IA32_APICBASE, l, h);
h 1244 arch/x86/kernel/apic_32.c rdmsr(MSR_IA32_APICBASE, l, h);
h 1632 arch/x86/kernel/apic_32.c unsigned int l, h;
h 1655 arch/x86/kernel/apic_32.c rdmsr(MSR_IA32_APICBASE, l, h);
h 1658 arch/x86/kernel/apic_32.c wrmsr(MSR_IA32_APICBASE, l, h);
h 712 arch/x86/kernel/apic_64.c unsigned int l, h;
h 714 arch/x86/kernel/apic_64.c rdmsr(MSR_IA32_APICBASE, l, h);
h 716 arch/x86/kernel/apic_64.c wrmsr(MSR_IA32_APICBASE, l, h);
h 1575 arch/x86/kernel/apic_64.c unsigned int l, h;
h 1598 arch/x86/kernel/apic_64.c rdmsr(MSR_IA32_APICBASE, l, h);
h 1601 arch/x86/kernel/apic_64.c wrmsr(MSR_IA32_APICBASE, l, h);
h 56 arch/x86/kernel/cpu/amd.c u32 l, h;
h 103 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K6_WHCR, l, h);
h 109 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K6_WHCR, l, h);
h 124 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K6_WHCR, l, h);
h 130 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K6_WHCR, l, h);
h 148 arch/x86/kernel/cpu/amd.c u32 l, h;
h 158 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K7_HWCR, l, h);
h 160 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K7_HWCR, l, h);
h 171 arch/x86/kernel/cpu/amd.c rdmsr(MSR_K7_CLK_CTL, l, h);
h 175 arch/x86/kernel/cpu/amd.c wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
h 154 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c u32 h;
h 158 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c rdmsr(cmd->addr.msr.reg, cmd->val, h);
h 296 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c u32 h;
h 298 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
h 299 arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c shift_count = fls(h);
h 59 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c u32 l, h;
h 64 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
h 72 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
h 75 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
h 86 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
h 247 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c u32 l, h;
h 249 arch/x86/kernel/cpu/cpufreq/p4-clockmod.c rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
h 324 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c unsigned l, h;
h 333 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_PERF_STATUS, l, h);
h 343 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_PERF_CTL, l, h);
h 356 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c unsigned l, h;
h 391 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h 396 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c wrmsr(MSR_IA32_MISC_ENABLE, l, h);
h 399 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h 473 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
h 537 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
h 564 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
h 591 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
h 75 arch/x86/kernel/cpu/mcheck/k7.c u32 l, h;
h 85 arch/x86/kernel/cpu/mcheck/k7.c rdmsr(MSR_IA32_MCG_CAP, l, h);
h 35 arch/x86/kernel/cpu/mcheck/mce_intel_64.c u32 l, h;
h 49 arch/x86/kernel/cpu/mcheck/mce_intel_64.c rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h 50 arch/x86/kernel/cpu/mcheck/mce_intel_64.c h = apic_read(APIC_LVTTHMR);
h 51 arch/x86/kernel/cpu/mcheck/mce_intel_64.c if ((l & (1 << 3)) && (h & APIC_DM_SMI)) {
h 60 arch/x86/kernel/cpu/mcheck/mce_intel_64.c if (h & APIC_VECTOR_MASK) {
h 63 arch/x86/kernel/cpu/mcheck/mce_intel_64.c "installed\n", cpu, (h & APIC_VECTOR_MASK));
h 67 arch/x86/kernel/cpu/mcheck/mce_intel_64.c h = THERMAL_APIC_VECTOR;
h 68 arch/x86/kernel/cpu/mcheck/mce_intel_64.c h |= (APIC_DM_FIXED | APIC_LVT_MASKED);
h 69 arch/x86/kernel/cpu/mcheck/mce_intel_64.c apic_write(APIC_LVTTHMR, h);
h 71 arch/x86/kernel/cpu/mcheck/mce_intel_64.c rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
h 72 arch/x86/kernel/cpu/mcheck/mce_intel_64.c wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
h 74 arch/x86/kernel/cpu/mcheck/mce_intel_64.c rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h 75 arch/x86/kernel/cpu/mcheck/mce_intel_64.c wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h);
h 71 arch/x86/kernel/cpu/mcheck/p4.c u32 l, h;
h 86 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h 87 arch/x86/kernel/cpu/mcheck/p4.c h = apic_read(APIC_LVTTHMR);
h 88 arch/x86/kernel/cpu/mcheck/p4.c if ((l & (1<<3)) && (h & APIC_DM_SMI)) {
h 95 arch/x86/kernel/cpu/mcheck/p4.c if (h & APIC_VECTOR_MASK) {
h 98 arch/x86/kernel/cpu/mcheck/p4.c cpu, (h & APIC_VECTOR_MASK));
h 103 arch/x86/kernel/cpu/mcheck/p4.c h = THERMAL_APIC_VECTOR; /* our delivery vector */
h 104 arch/x86/kernel/cpu/mcheck/p4.c h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */
h 105 arch/x86/kernel/cpu/mcheck/p4.c apic_write(APIC_LVTTHMR, h);
h 107 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
h 108 arch/x86/kernel/cpu/mcheck/p4.c wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);
h 113 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MISC_ENABLE, l, h);
h 114 arch/x86/kernel/cpu/mcheck/p4.c wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h);
h 130 arch/x86/kernel/cpu/mcheck/p4.c u32 h;
h 132 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_EAX, r->eax, h);
h 133 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_EBX, r->ebx, h);
h 134 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_ECX, r->ecx, h);
h 135 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_EDX, r->edx, h);
h 136 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_ESI, r->esi, h);
h 137 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_EDI, r->edi, h);
h 138 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_EBP, r->ebp, h);
h 139 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_ESP, r->esp, h);
h 140 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_EFLAGS, r->eflags, h);
h 141 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_EIP, r->eip, h);
h 223 arch/x86/kernel/cpu/mcheck/p4.c u32 l, h;
h 230 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_CAP, l, h);
h 245 arch/x86/kernel/cpu/mcheck/p4.c rdmsr(MSR_IA32_MCG_CAP, l, h);
h 33 arch/x86/kernel/cpu/mcheck/p5.c u32 l, h;
h 46 arch/x86/kernel/cpu/mcheck/p5.c rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
h 47 arch/x86/kernel/cpu/mcheck/p5.c rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
h 87 arch/x86/kernel/cpu/mcheck/p6.c u32 l, h;
h 103 arch/x86/kernel/cpu/mcheck/p6.c rdmsr(MSR_IA32_MCG_CAP, l, h);
h 1518 arch/x86/kernel/cpu/mtrr/main.c u32 l, h;
h 1525 arch/x86/kernel/cpu/mtrr/main.c if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
h 565 arch/x86/kernel/vmi_32.c unsigned l, h;
h 566 arch/x86/kernel/vmi_32.c rdmsr(MSR_EFER, l, h);
h 567 arch/x86/kernel/vmi_32.c ap.efer = (unsigned long long) h << 32 | l;
h 8 arch/x86/lib/msr-on-cpu.c u32 l, h;
h 16 arch/x86/lib/msr-on-cpu.c rdmsr(rv->msr_no, rv->l, rv->h);
h 23 arch/x86/lib/msr-on-cpu.c wrmsr(rv->msr_no, rv->l, rv->h);
h 34 arch/x86/lib/msr-on-cpu.c *h = rv.h;
h 46 arch/x86/lib/msr-on-cpu.c rv.h = h;
h 58 arch/x86/lib/msr-on-cpu.c rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h);
h 65 arch/x86/lib/msr-on-cpu.c rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h);
h 76 arch/x86/lib/msr-on-cpu.c *h = rv.h;
h 88 arch/x86/lib/msr-on-cpu.c rv.h = h;
h 19 arch/x86/math-emu/reg_constant.c #define MAKE_REG(s, e, l, h) { l, h, \
h 263 arch/x86/mm/hugetlbpage.c struct hstate *h = hstate_file(file);
h 276 arch/x86/mm/hugetlbpage.c addr = ALIGN(start_addr, huge_page_size(h));
h 298 arch/x86/mm/hugetlbpage.c addr = ALIGN(vma->vm_end, huge_page_size(h));
h 306 arch/x86/mm/hugetlbpage.c struct hstate *h = hstate_file(file);
h 327 arch/x86/mm/hugetlbpage.c addr = (mm->free_area_cache - len) & huge_page_mask(h);
h 358 arch/x86/mm/hugetlbpage.c addr = (vma->vm_start - len) & huge_page_mask(h);
h 396 arch/x86/mm/hugetlbpage.c struct hstate *h = hstate_file(file);
h 400 arch/x86/mm/hugetlbpage.c if (len & ~huge_page_mask(h))
h 412 arch/x86/mm/hugetlbpage.c addr = ALIGN(addr, huge_page_size(h));
h 598 arch/x86/mm/init_32.c unsigned int v[4], l, h;
h 604 arch/x86/mm/init_32.c rdmsr(MSR_EFER, l, h);
h 606 arch/x86/mm/init_32.c wrmsr(MSR_EFER, l, h);
h 30 arch/x86/oprofile/op_model_amd.c #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
h 35 arch/x86/oprofile/op_model_amd.c #define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
h 36 arch/x86/oprofile/op_model_amd.c #define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
h 47 arch/x86/oprofile/op_model_amd.c #define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
h 48 arch/x86/oprofile/op_model_amd.c #define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
h 371 arch/x86/oprofile/op_model_p4.c #define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
h 26 arch/x86/oprofile/op_model_ppro.c #define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
h 32 arch/x86/oprofile/op_model_ppro.c #define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
h 33 arch/x86/oprofile/op_model_ppro.c #define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
h 810 arch/x86/pci/irq.c struct irq_router_handler *h;
h 836 arch/x86/pci/irq.c for (h = pirq_routers; h->vendor; h++) {
h 838 arch/x86/pci/irq.c if (rt->rtr_vendor == h->vendor &&
h 839 arch/x86/pci/irq.c h->probe(r, pirq_router_dev, rt->rtr_device))
h 842 arch/x86/pci/irq.c if (pirq_router_dev->vendor == h->vendor &&
h 843 arch/x86/pci/irq.c h->probe(r, pirq_router_dev, pirq_router_dev->device))
h 50 arch/x86/xen/time.c u32 h, l;
h 59 arch/x86/xen/time.c h = p32[1];
h 63 arch/x86/xen/time.c } while (p32[1] != h);
h 65 arch/x86/xen/time.c ret = (((u64)h) << 32) | l;
h 61 crypto/sha256_generic.c u32 a, b, c, d, e, f, g, h, t1, t2;
h 75 crypto/sha256_generic.c e=state[4]; f=state[5]; g=state[6]; h=state[7];
h 78 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0];
h 79 crypto/sha256_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 81 crypto/sha256_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 83 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 85 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 87 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 88 crypto/sha256_generic.c t1 = c + e1(h) + Ch(h,a,b) + 0x59f111f1 + W[ 5];
h 90 crypto/sha256_generic.c t1 = b + e1(g) + Ch(g,h,a) + 0x923f82a4 + W[ 6];
h 92 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0xab1c5ed5 + W[ 7];
h 95 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0xd807aa98 + W[ 8];
h 96 crypto/sha256_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 98 crypto/sha256_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 100 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 102 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 104 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 105 crypto/sha256_generic.c t1 = c + e1(h) + Ch(h,a,b) + 0x80deb1fe + W[13];
h 107 crypto/sha256_generic.c t1 = b + e1(g) + Ch(g,h,a) + 0x9bdc06a7 + W[14];
h 109 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0xc19bf174 + W[15];
h 112 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0xe49b69c1 + W[16];
h 113 crypto/sha256_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 115 crypto/sha256_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 117 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 119 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 121 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 122 crypto/sha256_generic.c t1 = c + e1(h) + Ch(h,a,b) + 0x4a7484aa + W[21];
h 124 crypto/sha256_generic.c t1 = b + e1(g) + Ch(g,h,a) + 0x5cb0a9dc + W[22];
h 126 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x76f988da + W[23];
h 129 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x983e5152 + W[24];
h 130 crypto/sha256_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 132 crypto/sha256_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 134 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 136 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 138 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 139 crypto/sha256_generic.c t1 = c + e1(h) + Ch(h,a,b) + 0xd5a79147 + W[29];
h 141 crypto/sha256_generic.c t1 = b + e1(g) + Ch(g,h,a) + 0x06ca6351 + W[30];
h 143 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x14292967 + W[31];
h 146 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x27b70a85 + W[32];
h 147 crypto/sha256_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 149 crypto/sha256_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 151 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 153 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 155 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 156 crypto/sha256_generic.c t1 = c + e1(h) + Ch(h,a,b) + 0x766a0abb + W[37];
h 158 crypto/sha256_generic.c t1 = b + e1(g) + Ch(g,h,a) + 0x81c2c92e + W[38];
h 160 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x92722c85 + W[39];
h 163 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0xa2bfe8a1 + W[40];
h 164 crypto/sha256_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 166 crypto/sha256_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 168 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 170 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 172 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 173 crypto/sha256_generic.c t1 = c + e1(h) + Ch(h,a,b) + 0xd6990624 + W[45];
h 175 crypto/sha256_generic.c t1 = b + e1(g) + Ch(g,h,a) + 0xf40e3585 + W[46];
h 177 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x106aa070 + W[47];
h 180 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x19a4c116 + W[48];
h 181 crypto/sha256_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 183 crypto/sha256_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 185 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 187 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 189 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 190 crypto/sha256_generic.c t1 = c + e1(h) + Ch(h,a,b) + 0x4ed8aa4a + W[53];
h 192 crypto/sha256_generic.c t1 = b + e1(g) + Ch(g,h,a) + 0x5b9cca4f + W[54];
h 194 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x682e6ff3 + W[55];
h 197 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x748f82ee + W[56];
h 198 crypto/sha256_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 200 crypto/sha256_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 202 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 204 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 206 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 207 crypto/sha256_generic.c t1 = c + e1(h) + Ch(h,a,b) + 0xa4506ceb + W[61];
h 209 crypto/sha256_generic.c t1 = b + e1(g) + Ch(g,h,a) + 0xbef9a3f7 + W[62];
h 211 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0xc67178f2 + W[63];
h 215 crypto/sha256_generic.c state[4] += e; state[5] += f; state[6] += g; state[7] += h;
h 218 crypto/sha256_generic.c a = b = c = d = e = f = g = h = t1 = t2 = 0;
h 94 crypto/sha512_generic.c u64 a, b, c, d, e, f, g, h, t1, t2;
h 108 crypto/sha512_generic.c e=state[4]; f=state[5]; g=state[6]; h=state[7];
h 112 crypto/sha512_generic.c t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ];
h 113 crypto/sha512_generic.c t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
h 115 crypto/sha512_generic.c t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
h 117 crypto/sha512_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
h 119 crypto/sha512_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
h 121 crypto/sha512_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
h 122 crypto/sha512_generic.c t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
h 124 crypto/sha512_generic.c t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
h 126 crypto/sha512_generic.c t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
h 131 crypto/sha512_generic.c state[4] += e; state[5] += f; state[6] += g; state[7] += h;
h 134 crypto/sha512_generic.c a = b = c = d = e = f = g = h = t1 = t2 = 0;
h 25 fs/adfs/dir_fplus.c struct adfs_bigdirheader *h;
h 44 fs/adfs/dir_fplus.c h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
h 45 fs/adfs/dir_fplus.c size = le32_to_cpu(h->bigdirsize);
h 51 fs/adfs/dir_fplus.c if (h->bigdirversion[0] != 0 || h->bigdirversion[1] != 0 ||
h 52 fs/adfs/dir_fplus.c h->bigdirversion[2] != 0 || size & 2047 ||
h 53 fs/adfs/dir_fplus.c h->bigdirstartname != cpu_to_le32(BIGDIRSTARTNAME))
h 73 fs/adfs/dir_fplus.c t->bigdirendmasseq != h->startmasseq ||
h 77 fs/adfs/dir_fplus.c dir->parent_id = le32_to_cpu(h->bigdirparent);
h 90 fs/adfs/dir_fplus.c struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
h 93 fs/adfs/dir_fplus.c if (fpos <= le32_to_cpu(h->bigdirentries)) {
h 127 fs/adfs/dir_fplus.c struct adfs_bigdirheader *h = (struct adfs_bigdirheader *)dir->bh[0]->b_data;
h 132 fs/adfs/dir_fplus.c if (dir->pos >= le32_to_cpu(h->bigdirentries))
h 136 fs/adfs/dir_fplus.c offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
h 149 fs/adfs/dir_fplus.c offset += ((le32_to_cpu(h->bigdirnamelen) + 4) & ~3);
h 150 fs/adfs/dir_fplus.c offset += le32_to_cpu(h->bigdirentries) * sizeof(struct adfs_bigdirentry);
h 67 fs/autofs/autofs_i.h struct autofs_dir_ent *h[AUTOFS_HASH_SIZE];
h 107 fs/autofs/dirhash.c memset(&dh->h, 0, AUTOFS_HASH_SIZE*sizeof(struct autofs_dir_ent *));
h 118 fs/autofs/dirhash.c for ( dhn = dh->h[(unsigned) name->hash % AUTOFS_HASH_SIZE] ; dhn ; dhn = dhn->next ) {
h 139 fs/autofs/dirhash.c dhnp = &dh->h[(unsigned) ent->hash % AUTOFS_HASH_SIZE];
h 190 fs/autofs/dirhash.c ent = dh->h[bucket];
h 224 fs/autofs/dirhash.c for ( ent = dh->h[i] ; ent ; ent = ent->next ) {
h 241 fs/autofs/dirhash.c for ( ent = sbi->dirhash.h[i] ; ent ; ent = nent ) {
h 1457 fs/compat.c unsigned long h, l;
h 1458 fs/compat.c if (__get_user(l, ufdset) || __get_user(h, ufdset+1))
h 1461 fs/compat.c *fdset++ = h << 32 | l;
h 1489 fs/compat.c unsigned long h, l;
h 1491 fs/compat.c h = l >> 32;
h 1492 fs/compat.c if (__put_user(l, ufdset) || __put_user(h, ufdset+1))
h 179 fs/compat_ioctl.c err |= put_user(kevent.u.size.h, &up->u.size.h);
h 279 fs/ext3/namei.c struct dx_hash_info h = *hinfo;
h 291 fs/ext3/namei.c ext3fs_dirhash(de->name, de->name_len, &h);
h 292 fs/ext3/namei.c printk(":%x.%u ", h.hash,
h 716 fs/ext3/namei.c struct dx_hash_info h = *hinfo;
h 721 fs/ext3/namei.c ext3fs_dirhash(de->name, de->name_len, &h);
h 723 fs/ext3/namei.c map_tail->hash = h.hash;
h 2203 fs/ext4/mballoc.c struct ext4_mb_history h;
h 2211 fs/ext4/mballoc.c h.op = ac->ac_op;
h 2212 fs/ext4/mballoc.c h.pid = current->pid;
h 2213 fs/ext4/mballoc.c h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
h 2214 fs/ext4/mballoc.c h.orig = ac->ac_o_ex;
h 2215 fs/ext4/mballoc.c h.result = ac->ac_b_ex;
h 2216 fs/ext4/mballoc.c h.flags = ac->ac_flags;
h 2217 fs/ext4/mballoc.c h.found = ac->ac_found;
h 2218 fs/ext4/mballoc.c h.groups = ac->ac_groups_scanned;
h 2219 fs/ext4/mballoc.c h.cr = ac->ac_criteria;
h 2220 fs/ext4/mballoc.c h.tail = ac->ac_tail;
h 2221 fs/ext4/mballoc.c h.buddy = ac->ac_buddy;
h 2222 fs/ext4/mballoc.c h.merged = 0;
h 2226 fs/ext4/mballoc.c h.merged = 1;
h 2227 fs/ext4/mballoc.c h.goal = ac->ac_g_ex;
h 2228 fs/ext4/mballoc.c h.result = ac->ac_f_ex;
h 2232 fs/ext4/mballoc.c memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
h 282 fs/ext4/namei.c struct dx_hash_info h = *hinfo;
h 294 fs/ext4/namei.c ext4fs_dirhash(de->name, de->name_len, &h);
h 295 fs/ext4/namei.c printk(":%x.%u ", h.hash,
h 720 fs/ext4/namei.c struct dx_hash_info h = *hinfo;
h 725 fs/ext4/namei.c ext4fs_dirhash(de->name, de->name_len, &h);
h 727 fs/ext4/namei.c map_tail->hash = h.hash;
h 90 fs/fuse/dev.c req->in.h.uid = current->fsuid;
h 91 fs/fuse/dev.c req->in.h.gid = current->fsgid;
h 92 fs/fuse/dev.c req->in.h.pid = current->pid;
h 235 fs/fuse/dev.c req->in.h.unique = fuse_get_unique(fc);
h 236 fs/fuse/dev.c req->in.h.len = sizeof(struct fuse_in_header) +
h 353 fs/fuse/dev.c req->out.h.error = -EINTR;
h 388 fs/fuse/dev.c req->out.h.error = -ENOTCONN;
h 390 fs/fuse/dev.c req->out.h.error = -ECONNREFUSED;
h 424 fs/fuse/dev.c req->out.h.error = -ENOTCONN;
h 709 fs/fuse/dev.c arg.unique = req->in.h.unique;
h 772 fs/fuse/dev.c reqsize = in->h.len;
h 775 fs/fuse/dev.c req->out.h.error = -EIO;
h 777 fs/fuse/dev.c if (in->h.opcode == FUSE_SETXATTR)
h 778 fs/fuse/dev.c req->out.h.error = -E2BIG;
h 784 fs/fuse/dev.c err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
h 796 fs/fuse/dev.c req->out.h.error = -EIO;
h 824 fs/fuse/dev.c if (req->in.h.unique == unique || req->intr_unique == unique)
h 835 fs/fuse/dev.c if (out->h.error)
h 918 fs/fuse/dev.c req->out.h = oh;
h 932 fs/fuse/dev.c req->out.h.error = -EIO;
h 973 fs/fuse/dev.c req->out.h.error = -ECONNABORTED;
h 999 fs/fuse/dev.c req->out.h.error = -ECONNABORTED;
h 120 fs/fuse/dir.c req->in.h.opcode = FUSE_LOOKUP;
h 121 fs/fuse/dir.c req->in.h.nodeid = nodeid;
h 194 fs/fuse/dir.c err = req->out.h.error;
h 287 fs/fuse/dir.c err = req->out.h.error;
h 420 fs/fuse/dir.c req->in.h.opcode = FUSE_CREATE;
h 421 fs/fuse/dir.c req->in.h.nodeid = get_node_id(dir);
h 436 fs/fuse/dir.c err = req->out.h.error;
h 498 fs/fuse/dir.c req->in.h.nodeid = get_node_id(dir);
h 506 fs/fuse/dir.c err = req->out.h.error;
h 563 fs/fuse/dir.c req->in.h.opcode = FUSE_MKNOD;
h 594 fs/fuse/dir.c req->in.h.opcode = FUSE_MKDIR;
h 612 fs/fuse/dir.c req->in.h.opcode = FUSE_SYMLINK;
h 629 fs/fuse/dir.c req->in.h.opcode = FUSE_UNLINK;
h 630 fs/fuse/dir.c req->in.h.nodeid = get_node_id(dir);
h 635 fs/fuse/dir.c err = req->out.h.error;
h 660 fs/fuse/dir.c req->in.h.opcode = FUSE_RMDIR;
h 661 fs/fuse/dir.c req->in.h.nodeid = get_node_id(dir);
h 666 fs/fuse/dir.c err = req->out.h.error;
h 689 fs/fuse/dir.c req->in.h.opcode = FUSE_RENAME;
h 690 fs/fuse/dir.c req->in.h.nodeid = get_node_id(olddir);
h 699 fs/fuse/dir.c err = req->out.h.error;
h 739 fs/fuse/dir.c req->in.h.opcode = FUSE_LINK;
h 803 fs/fuse/dir.c req->in.h.opcode = FUSE_GETATTR;
h 804 fs/fuse/dir.c req->in.h.nodeid = get_node_id(inode);
h 815 fs/fuse/dir.c err = req->out.h.error;
h 902 fs/fuse/dir.c req->in.h.opcode = FUSE_ACCESS;
h 903 fs/fuse/dir.c req->in.h.nodeid = get_node_id(inode);
h 908 fs/fuse/dir.c err = req->out.h.error;
h 1031 fs/fuse/dir.c err = req->out.h.error;
h 1057 fs/fuse/dir.c req->in.h.opcode = FUSE_READLINK;
h 1058 fs/fuse/dir.c req->in.h.nodeid = get_node_id(inode);
h 1064 fs/fuse/dir.c if (req->out.h.error) {
h 1066 fs/fuse/dir.c link = ERR_PTR(req->out.h.error);
h 1258 fs/fuse/dir.c req->in.h.opcode = FUSE_SETATTR;
h 1259 fs/fuse/dir.c req->in.h.nodeid = get_node_id(inode);
h 1270 fs/fuse/dir.c err = req->out.h.error;
h 1354 fs/fuse/dir.c req->in.h.opcode = FUSE_SETXATTR;
h 1355 fs/fuse/dir.c req->in.h.nodeid = get_node_id(inode);
h 1364 fs/fuse/dir.c err = req->out.h.error;
h 1392 fs/fuse/dir.c req->in.h.opcode = FUSE_GETXATTR;
h 1393 fs/fuse/dir.c req->in.h.nodeid = get_node_id(inode);
h 1410 fs/fuse/dir.c ret = req->out.h.error;
h 1444 fs/fuse/dir.c req->in.h.opcode = FUSE_LISTXATTR;
h 1445 fs/fuse/dir.c req->in.h.nodeid = get_node_id(inode);
h 1460 fs/fuse/dir.c ret = req->out.h.error;
h 1487 fs/fuse/dir.c req->in.h.opcode = FUSE_REMOVEXATTR;
h 1488 fs/fuse/dir.c req->in.h.nodeid = get_node_id(inode);
h 1493 fs/fuse/dir.c err = req->out.h.error;
h 34 fs/fuse/file.c req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
h 35 fs/fuse/file.c req->in.h.nodeid = get_node_id(inode);
h 43 fs/fuse/file.c err = req->out.h.error;
h 145 fs/fuse/file.c req->in.h.opcode = opcode;
h 146 fs/fuse/file.c req->in.h.nodeid = nodeid;
h 275 fs/fuse/file.c req->in.h.opcode = FUSE_FLUSH;
h 276 fs/fuse/file.c req->in.h.nodeid = get_node_id(inode);
h 282 fs/fuse/file.c err = req->out.h.error;
h 340 fs/fuse/file.c req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
h 341 fs/fuse/file.c req->in.h.nodeid = get_node_id(inode);
h 346 fs/fuse/file.c err = req->out.h.error;
h 373 fs/fuse/file.c req->in.h.opcode = opcode;
h 374 fs/fuse/file.c req->in.h.nodeid = get_node_id(inode);
h 448 fs/fuse/file.c err = req->out.h.error;
h 477 fs/fuse/file.c if (!req->out.h.error && num_read < count) {
h 486 fs/fuse/file.c if (!req->out.h.error)
h 611 fs/fuse/file.c req->in.h.opcode = FUSE_WRITE;
h 612 fs/fuse/file.c req->in.h.nodeid = get_node_id(inode);
h 691 fs/fuse/file.c err = req->out.h.error;
h 738 fs/fuse/file.c if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
h 845 fs/fuse/file.c err = req->out.h.error;
h 987 fs/fuse/file.c if (req->out.h.error) {
h 989 fs/fuse/file.c res = req->out.h.error;
h 1114 fs/fuse/file.c mapping_set_error(inode->i_mapping, req->out.h.error);
h 1303 fs/fuse/file.c req->in.h.opcode = opcode;
h 1304 fs/fuse/file.c req->in.h.nodeid = get_node_id(inode);
h 1327 fs/fuse/file.c err = req->out.h.error;
h 1359 fs/fuse/file.c err = req->out.h.error;
h 1426 fs/fuse/file.c req->in.h.opcode = FUSE_BMAP;
h 1427 fs/fuse/file.c req->in.h.nodeid = get_node_id(inode);
h 1435 fs/fuse/file.c err = req->out.h.error;
h 119 fs/fuse/fuse_i.h struct fuse_in_header h;
h 140 fs/fuse/fuse_i.h struct fuse_out_header h;
h 92 fs/fuse/inode.c req->in.h.opcode = FUSE_FORGET;
h 93 fs/fuse/inode.c req->in.h.nodeid = nodeid;
h 270 fs/fuse/inode.c req->in.h.opcode = FUSE_DESTROY;
h 331 fs/fuse/inode.c req->in.h.opcode = FUSE_STATFS;
h 332 fs/fuse/inode.c req->in.h.nodeid = get_node_id(dentry->d_inode);
h 338 fs/fuse/inode.c err = req->out.h.error;
h 740 fs/fuse/inode.c if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
h 785 fs/fuse/inode.c req->in.h.opcode = FUSE_INIT;
h 80 fs/gfs2/dir.c #define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
h 456 fs/gfs2/dir.c const struct gfs2_meta_header *h = buf;
h 461 fs/gfs2/dir.c switch(be32_to_cpu(h->mh_type)) {
h 474 fs/gfs2/dir.c be32_to_cpu(h->mh_type));
h 544 fs/gfs2/dir.c struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
h 546 fs/gfs2/dir.c if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
h 133 fs/gfs2/glock.c unsigned int h;
h 135 fs/gfs2/glock.c h = jhash(&name->ln_number, sizeof(u64), 0);
h 136 fs/gfs2/glock.c h = jhash(&name->ln_type, sizeof(unsigned int), h);
h 137 fs/gfs2/glock.c h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
h 138 fs/gfs2/glock.c h &= GFS2_GL_HASH_MASK;
h 140 fs/gfs2/glock.c return h;
h 215 fs/gfs2/glock.c struct hlist_node *h;
h 217 fs/gfs2/glock.c hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
h 1719 fs/gfs2/glock.c struct hlist_node *h;
h 1727 fs/gfs2/glock.c hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
h 94 fs/hfs/hfs.h __be16 h;
h 207 fs/hfsplus/hfsplus_raw.h __be16 h;
h 235 fs/hpfs/dnode.c int h;
h 291 fs/hpfs/dnode.c h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10;
h 302 fs/hpfs/dnode.c for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) {
h 196 fs/hpfs/ea.c unsigned char h[4];
h 347 fs/hpfs/ea.c h[0] = 0;
h 348 fs/hpfs/ea.c h[1] = strlen(key);
h 349 fs/hpfs/ea.c h[2] = size & 0xff;
h 350 fs/hpfs/ea.c h[3] = size >> 8;
h 351 fs/hpfs/ea.c if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l, 4, h)) goto bail;
h 352 fs/hpfs/ea.c if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 4, h[1] + 1, key)) goto bail;
h 353 fs/hpfs/ea.c if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 5 + h[1], size, data)) goto bail;
h 85 fs/hugetlbfs/inode.c struct hstate *h = hstate_file(file);
h 98 fs/hugetlbfs/inode.c if (vma->vm_pgoff & ~(huge_page_mask(h) >> PAGE_SHIFT))
h 110 fs/hugetlbfs/inode.c vma->vm_pgoff >> huge_page_order(h),
h 111 fs/hugetlbfs/inode.c len >> huge_page_shift(h), vma))
h 136 fs/hugetlbfs/inode.c struct hstate *h = hstate_file(file);
h 138 fs/hugetlbfs/inode.c if (len & ~huge_page_mask(h))
h 150 fs/hugetlbfs/inode.c addr = ALIGN(addr, huge_page_size(h));
h 163 fs/hugetlbfs/inode.c addr = ALIGN(start_addr, huge_page_size(h));
h 181 fs/hugetlbfs/inode.c addr = ALIGN(vma->vm_end, huge_page_size(h));
h 232 fs/hugetlbfs/inode.c struct hstate *h = hstate_file(filp);
h 235 fs/hugetlbfs/inode.c unsigned long index = *ppos >> huge_page_shift(h);
h 236 fs/hugetlbfs/inode.c unsigned long offset = *ppos & ~huge_page_mask(h);
h 251 fs/hugetlbfs/inode.c end_index = (isize - 1) >> huge_page_shift(h);
h 257 fs/hugetlbfs/inode.c nr = huge_page_size(h);
h 261 fs/hugetlbfs/inode.c nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
h 295 fs/hugetlbfs/inode.c index += offset >> huge_page_shift(h);
h 296 fs/hugetlbfs/inode.c offset &= ~huge_page_mask(h);
h 306 fs/hugetlbfs/inode.c *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
h 347 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(inode);
h 349 fs/hugetlbfs/inode.c const pgoff_t start = lstart >> huge_page_shift(h);
h 458 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(inode);
h 460 fs/hugetlbfs/inode.c BUG_ON(offset & ~huge_page_mask(h));
h 475 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(inode);
h 487 fs/hugetlbfs/inode.c if (!(attr->ia_size & ~huge_page_mask(h)))
h 621 fs/hugetlbfs/inode.c struct hstate *h = hstate_inode(dentry->d_inode);
h 624 fs/hugetlbfs/inode.c buf->f_bsize = huge_page_size(h);
h 827 fs/hugetlbfs/inode.c struct hstate *h = pconfig->hstate;
h 829 fs/hugetlbfs/inode.c size <<= huge_page_shift(h);
h 830 fs/hugetlbfs/inode.c size *= h->max_huge_pages;
h 833 fs/hugetlbfs/inode.c pconfig->nr_blocks = (size >> huge_page_shift(h));
h 3898 fs/jfs/jfs_dtree.c struct dtslot *h, *t;
h 3917 fs/jfs/jfs_dtree.c h = &p->slot[fsi];
h 3918 fs/jfs/jfs_dtree.c p->header.freelist = h->next;
h 3930 fs/jfs/jfs_dtree.c lh = (struct ldtentry *) h;
h 3931 fs/jfs/jfs_dtree.c lh->next = h->next;
h 3945 fs/jfs/jfs_dtree.c ih = (struct idtentry *) h;
h 3946 fs/jfs/jfs_dtree.c ih->next = h->next;
h 3960 fs/jfs/jfs_dtree.c t = h;
h 4003 fs/jfs/jfs_dtree.c if (h == t) {
h 4064 fs/jfs/jfs_dtree.c struct dtslot *h, *s, *d;
h 4115 fs/jfs/jfs_dtree.c h = d = &dp->slot[dsi];
h 4122 fs/jfs/jfs_dtree.c dlh = (struct ldtentry *) h;
h 4143 fs/jfs/jfs_dtree.c dih = (struct idtentry *) h;
h 4211 fs/jfs/jfs_dtree.c if (h == d) {
h 584 fs/jfs/jfs_logmgr.c lspn = le32_to_cpu(lp->h.page);
h 634 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
h 655 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(lspn + 1);
h 656 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
h 795 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
h 803 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor);
h 887 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
h 1370 fs/jfs/jfs_logmgr.c le16_to_cpu(lp->h.eor));
h 1399 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
h 1688 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor);
h 2465 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(npages - 3);
h 2466 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE);
h 2485 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(lspn);
h 2486 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE);
h 127 fs/jfs/jfs_logmgr.h } h;
h 257 fs/ncpfs/sock.c h->conn_low = server->connection;
h 258 fs/ncpfs/sock.c h->conn_high = server->connection >> 8;
h 259 fs/ncpfs/sock.c h->sequence = ++server->sequence;
h 265 fs/ncpfs/sock.c struct ncp_request_header* h;
h 269 fs/ncpfs/sock.c h = req->tx_iov[1].iov_base;
h 270 fs/ncpfs/sock.c ncp_init_header(server, req, h);
h 294 fs/ncpfs/sock.c struct ncp_request_header* h;
h 297 fs/ncpfs/sock.c h = req->tx_iov[1].iov_base;
h 298 fs/ncpfs/sock.c ncp_init_header(server, req, h);
h 796 fs/ncpfs/sock.c struct ncp_request_header *h;
h 800 fs/ncpfs/sock.c h = (struct ncp_request_header *) (server->packet);
h 802 fs/ncpfs/sock.c *(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
h 804 fs/ncpfs/sock.c h->type = NCP_REQUEST;
h 809 fs/ncpfs/sock.c h->task = 2; /* (current->pid) & 0xff; */
h 810 fs/ncpfs/sock.c h->function = function;
h 832 fs/ncpfs/sock.c struct ncp_request_header *h;
h 838 fs/ncpfs/sock.c h = (struct ncp_request_header *) (server->packet);
h 839 fs/ncpfs/sock.c h->type = NCP_ALLOC_SLOT_REQUEST;
h 840 fs/ncpfs/sock.c h->task = 2; /* see above */
h 841 fs/ncpfs/sock.c h->function = 0;
h 843 fs/ncpfs/sock.c result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
h 846 fs/ncpfs/sock.c server->connection = h->conn_low + (h->conn_high * 256);
h 854 fs/ncpfs/sock.c struct ncp_request_header *h;
h 856 fs/ncpfs/sock.c h = (struct ncp_request_header *) (server->packet);
h 857 fs/ncpfs/sock.c h->type = NCP_DEALLOC_SLOT_REQUEST;
h 858 fs/ncpfs/sock.c h->task = 2; /* see above */
h 859 fs/ncpfs/sock.c h->function = 0;
h 861 fs/ncpfs/sock.c return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
h 158 fs/nfs/idmap.c return &h->h_entries[fnvhash32(name, len) % IDMAP_HASH_SZ];
h 164 fs/nfs/idmap.c struct idmap_hashent *he = idmap_name_hash(h, name, len);
h 176 fs/nfs/idmap.c return &h->h_entries[fnvhash32(&id, sizeof(id)) % IDMAP_HASH_SZ];
h 182 fs/nfs/idmap.c struct idmap_hashent *he = idmap_id_hash(h, id);
h 198 fs/nfs/idmap.c return idmap_name_hash(h, name, len);
h 204 fs/nfs/idmap.c return idmap_id_hash(h, id);
h 250 fs/nfs/idmap.c he = idmap_lookup_name(h, name, namelen);
h 260 fs/nfs/idmap.c im->im_type = h->h_type;
h 311 fs/nfs/idmap.c he = idmap_lookup_id(h, id);
h 319 fs/nfs/idmap.c im->im_type = h->h_type;
h 382 fs/nfs/idmap.c struct idmap_hashtable *h;
h 411 fs/nfs/idmap.c h = &idmap->idmap_user_hash;
h 414 fs/nfs/idmap.c h = &idmap->idmap_group_hash;
h 431 fs/nfs/idmap.c he = idmap_alloc_id(h, im_in.im_id);
h 442 fs/nfs/idmap.c he = idmap_alloc_name(h, im_in.im_name, namelen_in);
h 64 fs/nfsd/export.c struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
h 66 fs/nfsd/export.c if (test_bit(CACHE_VALID, &key->h.flags) &&
h 67 fs/nfsd/export.c !test_bit(CACHE_NEGATIVE, &key->h.flags))
h 78 fs/nfsd/export.c struct svc_expkey *ek = container_of(h, struct svc_expkey, h);
h 138 fs/nfsd/export.c key.h.flags = 0;
h 139 fs/nfsd/export.c key.h.expiry_time = get_expiry(&mesg);
h 140 fs/nfsd/export.c if (key.h.expiry_time == 0)
h 159 fs/nfsd/export.c set_bit(CACHE_NEGATIVE, &key.h.flags);
h 162 fs/nfsd/export.c cache_put(&ek->h, &svc_expkey_cache);
h 175 fs/nfsd/export.c cache_put(&ek->h, &svc_expkey_cache);
h 195 fs/nfsd/export.c if (h ==NULL) {
h 199 fs/nfsd/export.c ek = container_of(h, struct svc_expkey, h);
h 204 fs/nfsd/export.c if (test_bit(CACHE_VALID, &h->flags) &&
h 205 fs/nfsd/export.c !test_bit(CACHE_NEGATIVE, &h->flags)) {
h 215 fs/nfsd/export.c struct svc_expkey *orig = container_of(a, struct svc_expkey, h);
h 216 fs/nfsd/export.c struct svc_expkey *new = container_of(b, struct svc_expkey, h);
h 228 fs/nfsd/export.c struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
h 229 fs/nfsd/export.c struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
h 241 fs/nfsd/export.c struct svc_expkey *new = container_of(cnew, struct svc_expkey, h);
h 242 fs/nfsd/export.c struct svc_expkey *item = container_of(citem, struct svc_expkey, h);
h 252 fs/nfsd/export.c return &i->h;
h 284 fs/nfsd/export.c ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h,
h 287 fs/nfsd/export.c return container_of(ch, struct svc_expkey, h);
h 304 fs/nfsd/export.c ch = sunrpc_cache_update(&svc_expkey_cache, &new->h,
h 305 fs/nfsd/export.c &old->h, hash);
h 307 fs/nfsd/export.c return container_of(ch, struct svc_expkey, h);
h 332 fs/nfsd/export.c struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
h 345 fs/nfsd/export.c struct svc_export *exp = container_of(h, struct svc_export, h);
h 545 fs/nfsd/export.c exp.h.flags = 0;
h 556 fs/nfsd/export.c exp.h.expiry_time = get_expiry(&mesg);
h 557 fs/nfsd/export.c if (exp.h.expiry_time == 0)
h 564 fs/nfsd/export.c set_bit(CACHE_NEGATIVE, &exp.h.flags);
h 648 fs/nfsd/export.c if (h ==NULL) {
h 652 fs/nfsd/export.c exp = container_of(h, struct svc_export, h);
h 657 fs/nfsd/export.c if (test_bit(CACHE_VALID, &h->flags) &&
h 658 fs/nfsd/export.c !test_bit(CACHE_NEGATIVE, &h->flags)) {
h 677 fs/nfsd/export.c struct svc_export *orig = container_of(a, struct svc_export, h);
h 678 fs/nfsd/export.c struct svc_export *new = container_of(b, struct svc_export, h);
h 686 fs/nfsd/export.c struct svc_export *new = container_of(cnew, struct svc_export, h);
h 687 fs/nfsd/export.c struct svc_export *item = container_of(citem, struct svc_export, h);
h 701 fs/nfsd/export.c struct svc_export *new = container_of(cnew, struct svc_export, h);
h 702 fs/nfsd/export.c struct svc_export *item = container_of(citem, struct svc_export, h);
h 729 fs/nfsd/export.c return &i->h;
h 758 fs/nfsd/export.c ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h,
h 761 fs/nfsd/export.c return container_of(ch, struct svc_export, h);
h 775 fs/nfsd/export.c ch = sunrpc_cache_update(&svc_export_cache, &new->h,
h 776 fs/nfsd/export.c &old->h,
h 779 fs/nfsd/export.c return container_of(ch, struct svc_export, h);
h 801 fs/nfsd/export.c err = cache_check(&svc_expkey_cache, &ek->h, reqp);
h 816 fs/nfsd/export.c key.h.expiry_time = NEVER;
h 817 fs/nfsd/export.c key.h.flags = 0;
h 823 fs/nfsd/export.c cache_put(&ek->h, &svc_expkey_cache);
h 875 fs/nfsd/export.c err = cache_check(&svc_export_cache, &exp->h, reqp);
h 947 fs/nfsd/export.c ek->h.expiry_time = get_seconds()-1;
h 948 fs/nfsd/export.c cache_put(&ek->h, &svc_expkey_cache);
h 985 fs/nfsd/export.c ek->h.expiry_time = get_seconds()-1;
h 986 fs/nfsd/export.c cache_put(&ek->h, &svc_expkey_cache);
h 1061 fs/nfsd/export.c new.h.expiry_time = NEVER;
h 1062 fs/nfsd/export.c new.h.flags = 0;
h 1092 fs/nfsd/export.c cache_put(&fsid_key->h, &svc_expkey_cache);
h 1109 fs/nfsd/export.c unexp->h.expiry_time = get_seconds()-1;
h 1219 fs/nfsd/export.c cache_put(&ek->h, &svc_expkey_cache);
h 1520 fs/nfsd/export.c struct svc_export *exp = container_of(cp, struct svc_export, h);
h 1528 fs/nfsd/export.c cache_get(&exp->h);
h 1529 fs/nfsd/export.c if (cache_check(&svc_export_cache, &exp->h, NULL))
h 1531 fs/nfsd/export.c cache_put(&exp->h, &svc_export_cache);
h 69 fs/nfsd/nfs4idmap.c struct cache_head h;
h 85 fs/nfsd/nfs4idmap.c struct ent *new = container_of(cnew, struct ent, h);
h 86 fs/nfsd/nfs4idmap.c struct ent *itm = container_of(citm, struct ent, h);
h 98 fs/nfsd/nfs4idmap.c struct ent *map = container_of(ref, struct ent, h.ref);
h 107 fs/nfsd/nfs4idmap.c return &e->h;
h 137 fs/nfsd/nfs4idmap.c struct ent *ent = container_of(ch, struct ent, h);
h 151 fs/nfsd/nfs4idmap.c struct ent *a = container_of(ca, struct ent, h);
h 152 fs/nfsd/nfs4idmap.c struct ent *b = container_of(cb, struct ent, h);
h 163 fs/nfsd/nfs4idmap.c if (h == NULL) {
h 167 fs/nfsd/nfs4idmap.c ent = container_of(h, struct ent, h);
h 171 fs/nfsd/nfs4idmap.c if (test_bit(CACHE_VALID, &h->flags))
h 242 fs/nfsd/nfs4idmap.c ent.h.expiry_time = get_expiry(&buf);
h 243 fs/nfsd/nfs4idmap.c if (ent.h.expiry_time == 0)
h 257 fs/nfsd/nfs4idmap.c set_bit(CACHE_NEGATIVE, &ent.h.flags);
h 267 fs/nfsd/nfs4idmap.c cache_put(&res->h, &idtoname_cache);
h 281 fs/nfsd/nfs4idmap.c &item->h,
h 284 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h);
h 293 fs/nfsd/nfs4idmap.c &new->h, &old->h,
h 296 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h);
h 318 fs/nfsd/nfs4idmap.c struct ent *ent = container_of(ch, struct ent, h);
h 330 fs/nfsd/nfs4idmap.c struct ent *a = container_of(ca, struct ent, h);
h 331 fs/nfsd/nfs4idmap.c struct ent *b = container_of(cb, struct ent, h);
h 342 fs/nfsd/nfs4idmap.c if (h == NULL) {
h 346 fs/nfsd/nfs4idmap.c ent = container_of(h, struct ent, h);
h 350 fs/nfsd/nfs4idmap.c if (test_bit(CACHE_VALID, &h->flags))
h 411 fs/nfsd/nfs4idmap.c ent.h.expiry_time = get_expiry(&buf);
h 412 fs/nfsd/nfs4idmap.c if (ent.h.expiry_time == 0)
h 420 fs/nfsd/nfs4idmap.c set_bit(CACHE_NEGATIVE, &ent.h.flags);
h 430 fs/nfsd/nfs4idmap.c cache_put(&res->h, &nametoid_cache);
h 443 fs/nfsd/nfs4idmap.c &item->h,
h 446 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h);
h 455 fs/nfsd/nfs4idmap.c &new->h, &old->h,
h 458 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h);
h 541 fs/nfsd/nfs4idmap.c return cache_check(detail, &(*item)->h, &mdr->req);
h 555 fs/nfsd/nfs4idmap.c if (!test_bit(CACHE_VALID, &(*item)->h.flags)
h 556 fs/nfsd/nfs4idmap.c || (*item)->h.expiry_time < get_seconds()
h 557 fs/nfsd/nfs4idmap.c || detail->flush_time > (*item)->h.last_refresh)
h 560 fs/nfsd/nfs4idmap.c if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
h 564 fs/nfsd/nfs4idmap.c cache_put(&(*item)->h, detail);
h 587 fs/nfsd/nfs4idmap.c test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
h 623 fs/nfsd/nfs4idmap.c cache_put(&item->h, &nametoid_cache);
h 645 fs/nfsd/nfs4idmap.c cache_put(&item->h, &idtoname_cache);
h 60 fs/nfsd/nfs4proc.c cache_get(&src->fh_export->h);
h 498 fs/nfsd/nfsfh.c cache_get(&exp->h);
h 592 fs/nfsd/nfsfh.c cache_put(&exp->h, &svc_export_cache);
h 19 fs/nls/nls_euc-jp.c #define IS_SJIS_JISX0208(h, l) ((((0x81 <= (h)) && ((h) <= 0x9F)) \
h 20 fs/nls/nls_euc-jp.c || ((0xE0 <= (h)) && ((h) <= 0xEA))) \
h 23 fs/nls/nls_euc-jp.c #define IS_SJIS_UDC_LOW(h, l) (((0xF0 <= (h)) && ((h) <= 0xF4)) \
h 25 fs/nls/nls_euc-jp.c #define IS_SJIS_UDC_HI(h, l) (((0xF5 <= (h)) && ((h) <= 0xF9)) \
h 27 fs/nls/nls_euc-jp.c #define IS_SJIS_IBM(h, l) (((0xFA <= (h)) && ((h) <= 0xFC)) \
h 29 fs/nls/nls_euc-jp.c #define IS_SJIS_NECIBM(h, l) (((0xED <= (h)) && ((h) <= 0xEE)) \
h 44 fs/nls/nls_euc-jp.c #define IS_EUC_JISX0208(h, l) (IS_EUC_BYTE(h) && IS_EUC_BYTE(l))
h 45 fs/nls/nls_euc-jp.c #define IS_EUC_JISX0201KANA(h, l) (((h) == SS2) && (0xA1 <= (l) && (l) <= 0xDF))
h 46 fs/nls/nls_euc-jp.c #define IS_EUC_UDC_LOW(h, l) (((0xF5 <= (h)) && ((h) <= 0xFE)) \
h 48 fs/nls/nls_euc-jp.c #define IS_EUC_UDC_HI(h, l) IS_EUC_UDC_LOW(h, l) /* G3 block */
h 142 fs/nls/nls_euc-jp.c (((h) == 0xA2 && (l) == 0xCC) || ((h) == 0xA2 && (l) == 0xE8))
h 77 fs/partitions/ldm.c int h;
h 80 fs/partitions/ldm.c if ((x = src[0] - '0') <= '9'-'0') h = x;
h 81 fs/partitions/ldm.c else if ((x = src[0] - 'a') <= 'f'-'a') h = x+10;
h 82 fs/partitions/ldm.c else if ((x = src[0] - 'A') <= 'F'-'A') h = x+10;
h 84 fs/partitions/ldm.c h <<= 4;
h 87 fs/partitions/ldm.c if ((x = src[1] - '0') <= '9'-'0') return h | x;
h 88 fs/partitions/ldm.c if ((x = src[1] - 'a') <= 'f'-'a') return h | (x+10);
h 89 fs/partitions/ldm.c if ((x = src[1] - 'A') <= 'F'-'A') return h | (x+10);
h 82 fs/proc/proc_sysctl.c struct ctl_table_header *h = NULL;
h 100 fs/proc/proc_sysctl.c for (h = sysctl_head_next(NULL); h; h = sysctl_head_next(h)) {
h 101 fs/proc/proc_sysctl.c if (h->attached_to != table)
h 103 fs/proc/proc_sysctl.c p = find_in_table(h->attached_by, name);
h 113 fs/proc/proc_sysctl.c inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
h 114 fs/proc/proc_sysctl.c if (h)
h 115 fs/proc/proc_sysctl.c sysctl_head_finish(h);
h 246 fs/proc/proc_sysctl.c struct ctl_table_header *h = NULL;
h 280 fs/proc/proc_sysctl.c for (h = sysctl_head_next(NULL); h; h = sysctl_head_next(h)) {
h 281 fs/proc/proc_sysctl.c if (h->attached_to != table)
h 283 fs/proc/proc_sysctl.c ret = scan(h, h->attached_by, &pos, filp, dirent, filldir);
h 285 fs/proc/proc_sysctl.c sysctl_head_finish(h);
h 1828 fs/reiserfs/do_balan.c int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 1830 fs/reiserfs/do_balan.c RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL,
h 1832 fs/reiserfs/do_balan.c h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h));
h 1835 fs/reiserfs/do_balan.c return B_NR_ITEMS(tb->FL[h]);
h 1842 fs/reiserfs/do_balan.c int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 1844 fs/reiserfs/do_balan.c RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL,
h 1846 fs/reiserfs/do_balan.c h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]);
h 1848 fs/reiserfs/do_balan.c if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h)))
h 1983 fs/reiserfs/do_balan.c int h;
h 1986 fs/reiserfs/do_balan.c for (h = 1; tb->insert_size[h]; h++) {
h 1987 fs/reiserfs/do_balan.c check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h),
h 1989 fs/reiserfs/do_balan.c if (tb->lnum[h])
h 1990 fs/reiserfs/do_balan.c check_internal_node(tb->tb_sb, tb->L[h], "BAD L");
h 1991 fs/reiserfs/do_balan.c if (tb->rnum[h])
h 1992 fs/reiserfs/do_balan.c check_internal_node(tb->tb_sb, tb->R[h], "BAD R");
h 2090 fs/reiserfs/do_balan.c h; /* level of the tree being processed */
h 2131 fs/reiserfs/do_balan.c for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++)
h 2133 fs/reiserfs/do_balan.c balance_internal(tb, h, child_pos, insert_key, insert_ptr);
h 84 fs/reiserfs/fix_node.c Sh = PATH_H_PBUFFER(tb->tb_path, h);
h 88 fs/reiserfs/fix_node.c MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h];
h 91 fs/reiserfs/fix_node.c if (h) {
h 212 fs/reiserfs/fix_node.c if (h > 0) {
h 213 fs/reiserfs/fix_node.c tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
h 221 fs/reiserfs/fix_node.c tb->lnum[h] = 0;
h 292 fs/reiserfs/fix_node.c if (h > 0) {
h 293 fs/reiserfs/fix_node.c tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
h 301 fs/reiserfs/fix_node.c tb->rnum[h] = 0;
h 318 fs/reiserfs/fix_node.c tb->rnum[h] = vn->vn_nr_item;
h 395 fs/reiserfs/fix_node.c RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE),
h 398 fs/reiserfs/fix_node.c max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h));
h 406 fs/reiserfs/fix_node.c if (h > 0) {
h 591 fs/reiserfs/fix_node.c tb->lnum[h] = lnum;
h 592 fs/reiserfs/fix_node.c tb->rnum[h] = rnum;
h 593 fs/reiserfs/fix_node.c tb->blknum[h] = blk_num;
h 595 fs/reiserfs/fix_node.c if (h == 0) { /* only for leaf level */
h 605 fs/reiserfs/fix_node.c PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum);
h 606 fs/reiserfs/fix_node.c PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum);
h 608 fs/reiserfs/fix_node.c PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb);
h 609 fs/reiserfs/fix_node.c PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
h 714 fs/reiserfs/fix_node.c if (h)\
h 721 fs/reiserfs/fix_node.c set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
h 726 fs/reiserfs/fix_node.c set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
h 729 fs/reiserfs/fix_node.c set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
h 734 fs/reiserfs/fix_node.c if (h)\
h 740 fs/reiserfs/fix_node.c set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
h 745 fs/reiserfs/fix_node.c set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
h 748 fs/reiserfs/fix_node.c set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
h 860 fs/reiserfs/fix_node.c if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
h 861 fs/reiserfs/fix_node.c (l = tb->FL[h]) == NULL)
h 865 fs/reiserfs/fix_node.c order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1;
h 882 fs/reiserfs/fix_node.c if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
h 883 fs/reiserfs/fix_node.c (r = tb->FR[h]) == NULL)
h 887 fs/reiserfs/fix_node.c order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1;
h 1166 fs/reiserfs/fix_node.c struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h);
h 1167 fs/reiserfs/fix_node.c int levbytes = tb->insert_size[h];
h 1172 fs/reiserfs/fix_node.c if (tb->CFR[h])
h 1173 fs/reiserfs/fix_node.c r_key = B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]);
h 1178 fs/reiserfs/fix_node.c ((!h
h 1181 fs/reiserfs/fix_node.c ((!h && r_key
h 1183 fs/reiserfs/fix_node.c + ((h) ? KEY_SIZE : 0)) {
h 1186 fs/reiserfs/fix_node.c if (!h)
h 1190 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
h 1194 fs/reiserfs/fix_node.c PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]);
h 1252 fs/reiserfs/fix_node.c Sh = PATH_H_PBUFFER(tb->tb_path, h);
h 1253 fs/reiserfs/fix_node.c levbytes = tb->insert_size[h];
h 1257 fs/reiserfs/fix_node.c if (!h)
h 1260 fs/reiserfs/fix_node.c switch (n_ret_value = get_empty_nodes(tb, h)) {
h 1262 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
h 1274 fs/reiserfs/fix_node.c if ((n_ret_value = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */
h 1280 fs/reiserfs/fix_node.c rfree = get_rfree(tb, h);
h 1281 fs/reiserfs/fix_node.c lfree = get_lfree(tb, h);
h 1283 fs/reiserfs/fix_node.c if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) ==
h 1288 fs/reiserfs/fix_node.c create_virtual_node(tb, h);
h 1295 fs/reiserfs/fix_node.c check_left(tb, h, lfree);
h 1302 fs/reiserfs/fix_node.c check_right(tb, h, rfree);
h 1306 fs/reiserfs/fix_node.c if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
h 1316 fs/reiserfs/fix_node.c ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
h 1318 fs/reiserfs/fix_node.c tb->rnum[h]);
h 1319 fs/reiserfs/fix_node.c set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
h 1325 fs/reiserfs/fix_node.c RFALSE(h &&
h 1326 fs/reiserfs/fix_node.c (tb->lnum[h] >= vn->vn_nr_item + 1 ||
h 1327 fs/reiserfs/fix_node.c tb->rnum[h] >= vn->vn_nr_item + 1),
h 1329 fs/reiserfs/fix_node.c RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) ||
h 1330 fs/reiserfs/fix_node.c (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))),
h 1335 fs/reiserfs/fix_node.c if (!h && is_leaf_removable(tb))
h 1343 fs/reiserfs/fix_node.c if (!h)
h 1345 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
h 1372 fs/reiserfs/fix_node.c lpar = tb->lnum[h];
h 1373 fs/reiserfs/fix_node.c rpar = tb->rnum[h];
h 1380 fs/reiserfs/fix_node.c nver = get_num_ver(vn->vn_mode, tb, h,
h 1381 fs/reiserfs/fix_node.c 0, -1, h ? vn->vn_nr_item : 0, -1,
h 1384 fs/reiserfs/fix_node.c if (!h) {
h 1388 fs/reiserfs/fix_node.c nver1 = get_num_ver(vn->vn_mode, tb, h,
h 1402 fs/reiserfs/fix_node.c lnver = get_num_ver(vn->vn_mode, tb, h,
h 1403 fs/reiserfs/fix_node.c lpar - ((h || tb->lbytes == -1) ? 0 : 1),
h 1404 fs/reiserfs/fix_node.c -1, h ? vn->vn_nr_item : 0, -1,
h 1406 fs/reiserfs/fix_node.c if (!h) {
h 1409 fs/reiserfs/fix_node.c lnver1 = get_num_ver(vn->vn_mode, tb, h,
h 1425 fs/reiserfs/fix_node.c rnver = get_num_ver(vn->vn_mode, tb, h,
h 1427 fs/reiserfs/fix_node.c h ? (vn->vn_nr_item - rpar) : (rpar -
h 1433 fs/reiserfs/fix_node.c if (!h) {
h 1436 fs/reiserfs/fix_node.c rnver1 = get_num_ver(vn->vn_mode, tb, h,
h 1453 fs/reiserfs/fix_node.c lrnver = get_num_ver(vn->vn_mode, tb, h,
h 1454 fs/reiserfs/fix_node.c lpar - ((h || tb->lbytes == -1) ? 0 : 1),
h 1456 fs/reiserfs/fix_node.c h ? (vn->vn_nr_item - rpar) : (rpar -
h 1462 fs/reiserfs/fix_node.c if (!h) {
h 1465 fs/reiserfs/fix_node.c lrnver1 = get_num_ver(vn->vn_mode, tb, h,
h 1484 fs/reiserfs/fix_node.c RFALSE(h &&
h 1485 fs/reiserfs/fix_node.c (tb->lnum[h] != 1 ||
h 1486 fs/reiserfs/fix_node.c tb->rnum[h] != 1 ||
h 1488 fs/reiserfs/fix_node.c || h != 1), "vs-8230: bad h");
h 1490 fs/reiserfs/fix_node.c set_parameters(tb, h, tb->lnum[h], tb->rnum[h],
h 1494 fs/reiserfs/fix_node.c set_parameters(tb, h,
h 1495 fs/reiserfs/fix_node.c tb->lnum[h] -
h 1497 fs/reiserfs/fix_node.c tb->rnum[h] -
h 1506 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1,
h 1528 fs/reiserfs/fix_node.c if (is_left_neighbor_in_cache(tb, h)) {
h 1565 fs/reiserfs/fix_node.c Sh = PATH_H_PBUFFER(tb->tb_path, h);
h 1566 fs/reiserfs/fix_node.c Fh = PATH_H_PPARENT(tb->tb_path, h);
h 1573 fs/reiserfs/fix_node.c create_virtual_node(tb, h);
h 1577 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
h 1583 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 0, NULL, -1, -1);
h 1587 fs/reiserfs/fix_node.c if ((n_ret_value = get_parents(tb, h)) != CARRY_ON)
h 1591 fs/reiserfs/fix_node.c rfree = get_rfree(tb, h);
h 1592 fs/reiserfs/fix_node.c lfree = get_lfree(tb, h);
h 1595 fs/reiserfs/fix_node.c check_left(tb, h, lfree);
h 1596 fs/reiserfs/fix_node.c check_right(tb, h, rfree);
h 1602 fs/reiserfs/fix_node.c if (tb->lnum[h] >= vn->vn_nr_item + 1) {
h 1610 fs/reiserfs/fix_node.c h)) ==
h 1611 fs/reiserfs/fix_node.c 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
h 1612 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FL[h], order_L)) /
h 1614 fs/reiserfs/fix_node.c set_parameters(tb, h, -n - 1, 0, 0, NULL, -1,
h 1619 fs/reiserfs/fix_node.c if (tb->rnum[h] >= vn->vn_nr_item + 1) {
h 1627 fs/reiserfs/fix_node.c h)) ==
h 1629 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FR[h], order_R)) /
h 1631 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, -n - 1, 0, NULL, -1,
h 1637 fs/reiserfs/fix_node.c if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
h 1642 fs/reiserfs/fix_node.c ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] -
h 1643 fs/reiserfs/fix_node.c tb->rnum[h] + vn->vn_nr_item + 1) / 2 -
h 1644 fs/reiserfs/fix_node.c (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
h 1645 fs/reiserfs/fix_node.c set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r,
h 1651 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
h 1657 fs/reiserfs/fix_node.c if (tb->lnum[h] >= vn->vn_nr_item + 1)
h 1658 fs/reiserfs/fix_node.c if (is_left_neighbor_in_cache(tb, h)
h 1659 fs/reiserfs/fix_node.c || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) {
h 1666 fs/reiserfs/fix_node.c h)) ==
h 1667 fs/reiserfs/fix_node.c 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
h 1668 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE +
h 1670 fs/reiserfs/fix_node.c set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1);
h 1675 fs/reiserfs/fix_node.c if (tb->rnum[h] >= vn->vn_nr_item + 1) {
h 1682 fs/reiserfs/fix_node.c h)) == B_NR_ITEMS(Fh)) ? 0 : (n + 1);
h 1683 fs/reiserfs/fix_node.c n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE +
h 1685 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1);
h 1690 fs/reiserfs/fix_node.c if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
h 1694 fs/reiserfs/fix_node.c ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
h 1696 fs/reiserfs/fix_node.c tb->rnum[h]);
h 1697 fs/reiserfs/fix_node.c set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
h 1703 fs/reiserfs/fix_node.c RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root");
h 1706 fs/reiserfs/fix_node.c if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) {
h 1710 fs/reiserfs/fix_node.c (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item +
h 1712 fs/reiserfs/fix_node.c set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1);
h 1716 fs/reiserfs/fix_node.c set_parameters(tb, h, 0,
h 1717 fs/reiserfs/fix_node.c -((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item +
h 1754 fs/reiserfs/fix_node.c levbytes = tb->insert_size[h];
h 1763 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
h 1767 fs/reiserfs/fix_node.c if ((n_ret_value = get_parents(tb, h)) != CARRY_ON)
h 1771 fs/reiserfs/fix_node.c rfree = get_rfree(tb, h);
h 1772 fs/reiserfs/fix_node.c lfree = get_lfree(tb, h);
h 1774 fs/reiserfs/fix_node.c create_virtual_node(tb, h);
h 1784 fs/reiserfs/fix_node.c check_left(tb, h, lfree);
h 1785 fs/reiserfs/fix_node.c check_right(tb, h, rfree);
h 1789 fs/reiserfs/fix_node.c if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */
h 1790 fs/reiserfs/fix_node.c !tb->FR[h]) {
h 1792 fs/reiserfs/fix_node.c RFALSE(!tb->FL[h],
h 1796 fs/reiserfs/fix_node.c set_parameters(tb, h, -1, 0, 0, NULL, -1, -1);
h 1802 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, -1, 0, NULL, -1, -1);
h 1812 fs/reiserfs/fix_node.c set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
h 1831 fs/reiserfs/fix_node.c RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)),
h 1834 fs/reiserfs/fix_node.c if (h)
h 1835 fs/reiserfs/fix_node.c return dc_check_balance_internal(tb, h);
h 1837 fs/reiserfs/fix_node.c return dc_check_balance_leaf(tb, h);
h 1878 fs/reiserfs/fix_node.c if (tb->insert_size[h] > 0)
h 1880 fs/reiserfs/fix_node.c return ip_check_balance(tb, h);
h 1883 fs/reiserfs/fix_node.c return dc_check_balance(tb, h);
h 37 fs/reiserfs/ibalance.c src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
h 38 fs/reiserfs/ibalance.c src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
h 39 fs/reiserfs/ibalance.c src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 41 fs/reiserfs/ibalance.c dest_bi->bi_bh = tb->L[h];
h 42 fs/reiserfs/ibalance.c dest_bi->bi_parent = tb->FL[h];
h 43 fs/reiserfs/ibalance.c dest_bi->bi_position = get_left_neighbor_position(tb, h);
h 44 fs/reiserfs/ibalance.c *d_key = tb->lkey[h];
h 45 fs/reiserfs/ibalance.c *cf = tb->CFL[h];
h 49 fs/reiserfs/ibalance.c src_bi->bi_bh = tb->L[h];
h 50 fs/reiserfs/ibalance.c src_bi->bi_parent = tb->FL[h];
h 51 fs/reiserfs/ibalance.c src_bi->bi_position = get_left_neighbor_position(tb, h);
h 53 fs/reiserfs/ibalance.c dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
h 54 fs/reiserfs/ibalance.c dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
h 55 fs/reiserfs/ibalance.c dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1); /* dest position is analog of dest->b_item_order */
h 56 fs/reiserfs/ibalance.c *d_key = tb->lkey[h];
h 57 fs/reiserfs/ibalance.c *cf = tb->CFL[h];
h 62 fs/reiserfs/ibalance.c src_bi->bi_bh = tb->R[h];
h 63 fs/reiserfs/ibalance.c src_bi->bi_parent = tb->FR[h];
h 64 fs/reiserfs/ibalance.c src_bi->bi_position = get_right_neighbor_position(tb, h);
h 66 fs/reiserfs/ibalance.c dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
h 67 fs/reiserfs/ibalance.c dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
h 68 fs/reiserfs/ibalance.c dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 69 fs/reiserfs/ibalance.c *d_key = tb->rkey[h];
h 70 fs/reiserfs/ibalance.c *cf = tb->CFR[h];
h 75 fs/reiserfs/ibalance.c src_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
h 76 fs/reiserfs/ibalance.c src_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
h 77 fs/reiserfs/ibalance.c src_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 79 fs/reiserfs/ibalance.c dest_bi->bi_bh = tb->R[h];
h 80 fs/reiserfs/ibalance.c dest_bi->bi_parent = tb->FR[h];
h 81 fs/reiserfs/ibalance.c dest_bi->bi_position = get_right_neighbor_position(tb, h);
h 82 fs/reiserfs/ibalance.c *d_key = tb->rkey[h];
h 83 fs/reiserfs/ibalance.c *cf = tb->CFR[h];
h 88 fs/reiserfs/ibalance.c dest_bi->bi_bh = tb->L[h];
h 89 fs/reiserfs/ibalance.c dest_bi->bi_parent = tb->FL[h];
h 90 fs/reiserfs/ibalance.c dest_bi->bi_position = get_left_neighbor_position(tb, h);
h 95 fs/reiserfs/ibalance.c dest_bi->bi_bh = PATH_H_PBUFFER(tb->tb_path, h);
h 96 fs/reiserfs/ibalance.c dest_bi->bi_parent = PATH_H_PPARENT(tb->tb_path, h);
h 97 fs/reiserfs/ibalance.c dest_bi->bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 102 fs/reiserfs/ibalance.c dest_bi->bi_bh = tb->R[h];
h 103 fs/reiserfs/ibalance.c dest_bi->bi_parent = tb->FR[h];
h 104 fs/reiserfs/ibalance.c dest_bi->bi_position = get_right_neighbor_position(tb, h);
h 470 fs/reiserfs/ibalance.c internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi,
h 507 fs/reiserfs/ibalance.c internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
h 535 fs/reiserfs/ibalance.c internal_define_dest_src_infos(mode, tb, h, &dest_bi, &src_bi,
h 544 fs/reiserfs/ibalance.c RFALSE(src_bi.bi_bh != PATH_H_PBUFFER(tb->tb_path, h) /*tb->S[h] */ ||
h 545 fs/reiserfs/ibalance.c dest_bi.bi_bh != tb->R[h],
h 547 fs/reiserfs/ibalance.c src_bi.bi_bh, PATH_H_PBUFFER(tb->tb_path, h));
h 549 fs/reiserfs/ibalance.c if (tb->CFL[h])
h 550 fs/reiserfs/ibalance.c replace_key(tb, cf, d_key_position, tb->CFL[h],
h 551 fs/reiserfs/ibalance.c tb->lkey[h]);
h 574 fs/reiserfs/ibalance.c internal_define_dest_src_infos(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
h 594 fs/reiserfs/ibalance.c struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
h 597 fs/reiserfs/ibalance.c insert_num = tb->insert_size[h] / ((int)(DC_SIZE + KEY_SIZE));
h 602 fs/reiserfs/ibalance.c bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
h 603 fs/reiserfs/ibalance.c bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 607 fs/reiserfs/ibalance.c RFALSE(tb->blknum[h] > 1,
h 608 fs/reiserfs/ibalance.c "tb->blknum[%d]=%d when insert_size < 0", h, tb->blknum[h]);
h 612 fs/reiserfs/ibalance.c if (tb->lnum[h] == 0 && tb->rnum[h] == 0) {
h 613 fs/reiserfs/ibalance.c if (tb->blknum[h] == 0) {
h 625 fs/reiserfs/ibalance.c if (!tb->L[h - 1] || !B_NR_ITEMS(tb->L[h - 1]))
h 626 fs/reiserfs/ibalance.c new_root = tb->R[h - 1];
h 628 fs/reiserfs/ibalance.c new_root = tb->L[h - 1];
h 639 fs/reiserfs/ibalance.c if (h > 1)
h 651 fs/reiserfs/ibalance.c if (tb->L[h] && tb->lnum[h] == -B_NR_ITEMS(tb->L[h]) - 1) { /* join S[h] with L[h] */
h 653 fs/reiserfs/ibalance.c RFALSE(tb->rnum[h] != 0,
h 655 fs/reiserfs/ibalance.c h, tb->rnum[h]);
h 657 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, n + 1);
h 663 fs/reiserfs/ibalance.c if (tb->R[h] && tb->rnum[h] == -B_NR_ITEMS(tb->R[h]) - 1) { /* join S[h] with R[h] */
h 664 fs/reiserfs/ibalance.c RFALSE(tb->lnum[h] != 0,
h 666 fs/reiserfs/ibalance.c h, tb->lnum[h]);
h 668 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h, n + 1);
h 674 fs/reiserfs/ibalance.c if (tb->lnum[h] < 0) { /* borrow from left neighbor L[h] */
h 675 fs/reiserfs/ibalance.c RFALSE(tb->rnum[h] != 0,
h 676 fs/reiserfs/ibalance.c "wrong tb->rnum[%d]==%d when borrow from L[h]", h,
h 677 fs/reiserfs/ibalance.c tb->rnum[h]);
h 679 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_L_TO_S, tb, h,
h 680 fs/reiserfs/ibalance.c -tb->lnum[h]);
h 684 fs/reiserfs/ibalance.c if (tb->rnum[h] < 0) { /* borrow from right neighbor R[h] */
h 685 fs/reiserfs/ibalance.c RFALSE(tb->lnum[h] != 0,
h 687 fs/reiserfs/ibalance.c h, tb->lnum[h]);
h 688 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_R_TO_S, tb, h, -tb->rnum[h]); /*tb->S[h], tb->CFR[h], tb->rkey[h], tb->R[h], -tb->rnum[h]); */
h 692 fs/reiserfs/ibalance.c if (tb->lnum[h] > 0) { /* split S[h] into two parts and put them into neighbors */
h 693 fs/reiserfs/ibalance.c RFALSE(tb->rnum[h] == 0 || tb->lnum[h] + tb->rnum[h] != n + 1,
h 695 fs/reiserfs/ibalance.c h, tb->lnum[h], h, tb->rnum[h], n);
h 697 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h, tb->lnum[h]); /*tb->L[h], tb->CFL[h], tb->lkey[h], tb->S[h], tb->lnum[h]); */
h 698 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
h 699 fs/reiserfs/ibalance.c tb->rnum[h]);
h 707 fs/reiserfs/ibalance.c h, tb->lnum[h], h, tb->rnum[h]);
h 713 fs/reiserfs/ibalance.c RFALSE(tb->L[h] == NULL || tb->CFL[h] == NULL,
h 715 fs/reiserfs/ibalance.c tb->L[h], tb->CFL[h]);
h 717 fs/reiserfs/ibalance.c if (B_NR_ITEMS(PATH_H_PBUFFER(tb->tb_path, h)) == 0)
h 720 fs/reiserfs/ibalance.c memcpy(B_N_PDELIM_KEY(tb->CFL[h], tb->lkey[h]), key, KEY_SIZE);
h 722 fs/reiserfs/ibalance.c do_balance_mark_internal_dirty(tb, tb->CFL[h], 0);
h 728 fs/reiserfs/ibalance.c RFALSE(tb->R[h] == NULL || tb->CFR[h] == NULL,
h 730 fs/reiserfs/ibalance.c tb->R[h], tb->CFR[h]);
h 731 fs/reiserfs/ibalance.c RFALSE(B_NR_ITEMS(tb->R[h]) == 0,
h 733 fs/reiserfs/ibalance.c B_NR_ITEMS(tb->R[h]));
h 735 fs/reiserfs/ibalance.c memcpy(B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]), key, KEY_SIZE);
h 737 fs/reiserfs/ibalance.c do_balance_mark_internal_dirty(tb, tb->CFR[h], 0);
h 760 fs/reiserfs/ibalance.c struct buffer_head *tbSh = PATH_H_PBUFFER(tb->tb_path, h);
h 769 fs/reiserfs/ibalance.c RFALSE(h < 1, "h (%d) can not be < 1 on internal level", h);
h 771 fs/reiserfs/ibalance.c PROC_INFO_INC(tb->tb_sb, balance_at[h]);
h 775 fs/reiserfs/ibalance.c h + 1) /*tb->S[h]->b_item_order */ : 0;
h 779 fs/reiserfs/ibalance.c insert_num = tb->insert_size[h] / ((int)(KEY_SIZE + DC_SIZE));
h 785 fs/reiserfs/ibalance.c RFALSE(h > 1 && (insert_num > 1 || insert_num < -1),
h 787 fs/reiserfs/ibalance.c insert_num, h);
h 791 fs/reiserfs/ibalance.c balance_internal_when_delete(tb, h, child_pos);
h 796 fs/reiserfs/ibalance.c if (tb->lnum[h] > 0) {
h 800 fs/reiserfs/ibalance.c n = B_NR_ITEMS(tb->L[h]); /* number of items in L[h] */
h 801 fs/reiserfs/ibalance.c if (tb->lnum[h] <= child_pos) {
h 803 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
h 804 fs/reiserfs/ibalance.c tb->lnum[h]);
h 806 fs/reiserfs/ibalance.c child_pos -= tb->lnum[h];
h 807 fs/reiserfs/ibalance.c } else if (tb->lnum[h] > child_pos + insert_num) {
h 809 fs/reiserfs/ibalance.c internal_shift_left(INTERNAL_SHIFT_FROM_S_TO_L, tb, h,
h 810 fs/reiserfs/ibalance.c tb->lnum[h] - insert_num);
h 816 fs/reiserfs/ibalance.c bi.bi_bh = tb->L[h];
h 817 fs/reiserfs/ibalance.c bi.bi_parent = tb->FL[h];
h 818 fs/reiserfs/ibalance.c bi.bi_position = get_left_neighbor_position(tb, h);
h 830 fs/reiserfs/ibalance.c internal_shift1_left(tb, h, child_pos + 1);
h 832 fs/reiserfs/ibalance.c k = tb->lnum[h] - child_pos - 1;
h 834 fs/reiserfs/ibalance.c bi.bi_bh = tb->L[h];
h 835 fs/reiserfs/ibalance.c bi.bi_parent = tb->FL[h];
h 836 fs/reiserfs/ibalance.c bi.bi_position = get_left_neighbor_position(tb, h);
h 842 fs/reiserfs/ibalance.c replace_lkey(tb, h, insert_key + k);
h 861 fs/reiserfs/ibalance.c if (tb->rnum[h] > 0) {
h 865 fs/reiserfs/ibalance.c if (n - tb->rnum[h] >= child_pos)
h 868 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
h 869 fs/reiserfs/ibalance.c tb->rnum[h]);
h 870 fs/reiserfs/ibalance.c else if (n + insert_num - tb->rnum[h] < child_pos) {
h 874 fs/reiserfs/ibalance.c internal_shift_right(INTERNAL_SHIFT_FROM_S_TO_R, tb, h,
h 875 fs/reiserfs/ibalance.c tb->rnum[h] - insert_num);
h 879 fs/reiserfs/ibalance.c bi.bi_bh = tb->R[h];
h 880 fs/reiserfs/ibalance.c bi.bi_parent = tb->FR[h];
h 881 fs/reiserfs/ibalance.c bi.bi_position = get_right_neighbor_position(tb, h);
h 885 fs/reiserfs/ibalance.c tb->rnum[h] - 1,
h 893 fs/reiserfs/ibalance.c internal_shift1_right(tb, h, n - child_pos + 1);
h 895 fs/reiserfs/ibalance.c k = tb->rnum[h] - n + child_pos - 1;
h 897 fs/reiserfs/ibalance.c bi.bi_bh = tb->R[h];
h 898 fs/reiserfs/ibalance.c bi.bi_parent = tb->FR[h];
h 899 fs/reiserfs/ibalance.c bi.bi_position = get_right_neighbor_position(tb, h);
h 905 fs/reiserfs/ibalance.c replace_rkey(tb, h, insert_key + insert_num - k - 1);
h 908 fs/reiserfs/ibalance.c dc = B_N_CHILD(tb->R[h], 0);
h 918 fs/reiserfs/ibalance.c do_balance_mark_internal_dirty(tb, tb->R[h], 0);
h 925 fs/reiserfs/ibalance.c RFALSE(tb->blknum[h] > 2, "blknum can not be > 2 for internal level");
h 926 fs/reiserfs/ibalance.c RFALSE(tb->blknum[h] < 0, "blknum can not be < 0");
h 928 fs/reiserfs/ibalance.c if (!tb->blknum[h]) { /* node S[h] is empty now */
h 939 fs/reiserfs/ibalance.c struct buffer_head *tbSh_1 = PATH_H_PBUFFER(tb->tb_path, h - 1);
h 942 fs/reiserfs/ibalance.c if (tb->blknum[h] != 1)
h 948 fs/reiserfs/ibalance.c set_blkh_level(blkh, h + 1);
h 957 fs/reiserfs/ibalance.c tb->insert_size[h] -= DC_SIZE;
h 976 fs/reiserfs/ibalance.c if (tb->blknum[h] == 2) {
h 983 fs/reiserfs/ibalance.c set_blkh_level(B_BLK_HEAD(S_new), h + 1);
h 991 fs/reiserfs/ibalance.c src_bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
h 992 fs/reiserfs/ibalance.c src_bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 1077 fs/reiserfs/ibalance.c bi.bi_parent = PATH_H_PPARENT(tb->tb_path, h);
h 1078 fs/reiserfs/ibalance.c bi.bi_position = PATH_H_POSITION(tb->tb_path, h + 1);
h 57 fs/reiserfs/journal.c #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
h 59 fs/reiserfs/journal.c #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
h 587 fs/reiserfs/prints.c int h = 0;
h 603 fs/reiserfs/prints.c for (h = 0; h < ARRAY_SIZE(tb->insert_size); h++) {
h 604 fs/reiserfs/prints.c if (PATH_H_PATH_OFFSET(tb->tb_path, h) <=
h 607 fs/reiserfs/prints.c h) > ILLEGAL_PATH_ELEMENT_OFFSET) {
h 608 fs/reiserfs/prints.c tbSh = PATH_H_PBUFFER(tb->tb_path, h);
h 609 fs/reiserfs/prints.c tbFh = PATH_H_PPARENT(tb->tb_path, h);
h 616 fs/reiserfs/prints.c h,
h 619 fs/reiserfs/prints.c (tb->L[h]) ? (long long)(tb->L[h]->b_blocknr) : (-1LL),
h 620 fs/reiserfs/prints.c (tb->L[h]) ? atomic_read(&(tb->L[h]->b_count)) : -1,
h 621 fs/reiserfs/prints.c (tb->R[h]) ? (long long)(tb->R[h]->b_blocknr) : (-1LL),
h 622 fs/reiserfs/prints.c (tb->R[h]) ? atomic_read(&(tb->R[h]->b_count)) : -1,
h 624 fs/reiserfs/prints.c (tb->FL[h]) ? (long long)(tb->FL[h]->
h 626 fs/reiserfs/prints.c (tb->FR[h]) ? (long long)(tb->FR[h]->
h 628 fs/reiserfs/prints.c (tb->CFL[h]) ? (long long)(tb->CFL[h]->
h 630 fs/reiserfs/prints.c (tb->CFR[h]) ? (long long)(tb->CFR[h]->
h 644 fs/reiserfs/prints.c h = 0;
h 646 fs/reiserfs/prints.c h++;
h 649 fs/reiserfs/prints.c h, tb->insert_size[h], tb->lnum[h], tb->rnum[h],
h 650 fs/reiserfs/prints.c tb->blknum[h]);
h 651 fs/reiserfs/prints.c } while (tb->insert_size[h]);
h 658 fs/reiserfs/prints.c h = 0;
h 122 fs/ubifs/log.c long long h, t;
h 124 fs/ubifs/log.c h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
h 127 fs/ubifs/log.c if (h >= t)
h 128 fs/ubifs/log.c return c->log_bytes - h + t;
h 130 fs/ubifs/log.c return t - h;
h 1413 fs/ubifs/lpt.c int err, i, h, iip, shft;
h 1425 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) {
h 1547 fs/ubifs/lpt.c int err, i, h, iip, shft;
h 1562 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) {
h 1884 fs/ubifs/lpt.c int err = 0, i, h, iip, shft;
h 1916 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) {
h 1919 fs/ubifs/lpt.c nnode = scan_get_nnode(c, path + h, nnode, iip);
h 1927 fs/ubifs/lpt.c pnode = scan_get_pnode(c, path + h, nnode, iip);
h 1939 fs/ubifs/lpt.c ret = scan_cb(c, lprops, path[h].in_tree, data);
h 1946 fs/ubifs/lpt.c for (h = 1; h < c->lpt_hght; h++) {
h 1950 fs/ubifs/lpt.c if (path[h].in_tree)
h 1957 fs/ubifs/lpt.c memcpy(nnode, &path[h].nnode, sz);
h 1960 fs/ubifs/lpt.c path[h].ptr.nnode = nnode;
h 1961 fs/ubifs/lpt.c path[h].in_tree = 1;
h 1962 fs/ubifs/lpt.c path[h + 1].cnode.parent = nnode;
h 1964 fs/ubifs/lpt.c if (path[h].in_tree)
h 1975 fs/ubifs/lpt.c memcpy(pnode, &path[h].pnode, sz);
h 1978 fs/ubifs/lpt.c path[h].ptr.pnode = pnode;
h 1979 fs/ubifs/lpt.c path[h].in_tree = 1;
h 2017 fs/ubifs/lpt.c h -= 1;
h 2018 fs/ubifs/lpt.c ubifs_assert(h >= 0);
h 2019 fs/ubifs/lpt.c nnode = path[h].ptr.nnode;
h 2027 fs/ubifs/lpt.c h += 1;
h 2028 fs/ubifs/lpt.c for (; h < c->lpt_hght; h++) {
h 2029 fs/ubifs/lpt.c nnode = scan_get_nnode(c, path + h, nnode, iip);
h 2036 fs/ubifs/lpt.c pnode = scan_get_pnode(c, path + h, nnode, iip);
h 567 fs/ubifs/lpt_commit.c int err, h, iip, shft;
h 578 fs/ubifs/lpt_commit.c for (h = 1; h < c->lpt_hght; h++) {
h 1306 fs/ubifs/lpt_commit.c int h, i, found;
h 1312 fs/ubifs/lpt_commit.c for (h = 1; h < c->lpt_hght; h++) {
h 1318 fs/ubifs/lpt_commit.c *hght = h;
h 1341 fs/ubifs/lpt_commit.c int iip, h, i, found;
h 1359 fs/ubifs/lpt_commit.c for (h = *hght + 1; h < c->lpt_hght; h++) {
h 1365 fs/ubifs/lpt_commit.c *hght = h;
h 913 fs/xfs/quota/xfs_dquot.c xfs_dqhash_t *h;
h 923 fs/xfs/quota/xfs_dquot.c h = XFS_DQ_HASH(mp, id, type);
h 949 fs/xfs/quota/xfs_dquot.c XFS_DQ_HASH_LOCK(h);
h 955 fs/xfs/quota/xfs_dquot.c if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) {
h 964 fs/xfs/quota/xfs_dquot.c XFS_DQ_HASH_UNLOCK(h);
h 983 fs/xfs/quota/xfs_dquot.c version = h->qh_version;
h 984 fs/xfs/quota/xfs_dquot.c XFS_DQ_HASH_UNLOCK(h);
h 1049 fs/xfs/quota/xfs_dquot.c XFS_DQ_HASH_LOCK(h);
h 1050 fs/xfs/quota/xfs_dquot.c if (version != h->qh_version) {
h 1059 fs/xfs/quota/xfs_dquot.c if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) {
h 1065 fs/xfs/quota/xfs_dquot.c XFS_DQ_HASH_UNLOCK(h);
h 1076 fs/xfs/quota/xfs_dquot.c ASSERT(XFS_DQ_IS_HASH_LOCKED(h));
h 1077 fs/xfs/quota/xfs_dquot.c dqp->q_hash = h;
h 1078 fs/xfs/quota/xfs_dquot.c XQM_HASHLIST_INSERT(h, dqp);
h 1095 fs/xfs/quota/xfs_dquot.c XFS_DQ_HASH_UNLOCK(h);
h 1170 fs/xfs/quota/xfs_qm_syscalls.c if (((d) = (h)->qh_next))
h 1173 fs/xfs/quota/xfs_qm_syscalls.c (dqp)->HL_PREVP = &((h)->qh_next);
h 1174 fs/xfs/quota/xfs_qm_syscalls.c (h)->qh_next = (xfs_dquot_t *)dqp;
h 1175 fs/xfs/quota/xfs_qm_syscalls.c (h)->qh_version++;
h 1176 fs/xfs/quota/xfs_qm_syscalls.c (h)->qh_nelems++;
h 1285 fs/xfs/quota/xfs_qm_syscalls.c xfs_dqhash_t *h;
h 1287 fs/xfs/quota/xfs_qm_syscalls.c h = DQTEST_HASH(mp, id, type);
h 1288 fs/xfs/quota/xfs_qm_syscalls.c for (d = (xfs_dqtest_t *) h->qh_next; d != NULL;
h 1300 fs/xfs/quota/xfs_qm_syscalls.c d->q_hash = h;
h 1301 fs/xfs/quota/xfs_qm_syscalls.c xfs_qm_hashinsert(h, d);
h 49 fs/xfs/quota/xfs_quota_priv.h #define XQMLCK(h) (mutex_lock(&((h)->qh_lock)))
h 50 fs/xfs/quota/xfs_quota_priv.h #define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock)))
h 55 fs/xfs/quota/xfs_quota_priv.h if (mutex_trylock(&h->qh_lock)) {
h 56 fs/xfs/quota/xfs_quota_priv.h mutex_unlock(&h->qh_lock);
h 63 fs/xfs/quota/xfs_quota_priv.h #define XFS_DQ_HASH_LOCK(h) XQMLCK(h)
h 64 fs/xfs/quota/xfs_quota_priv.h #define XFS_DQ_HASH_UNLOCK(h) XQMUNLCK(h)
h 65 fs/xfs/quota/xfs_quota_priv.h #define XFS_DQ_IS_HASH_LOCKED(h) XQMISLCKD(h)
h 113 fs/xfs/quota/xfs_quota_priv.h (h)->qh_version++; \
h 114 fs/xfs/quota/xfs_quota_priv.h (h)->qh_nelems--; \
h 120 fs/xfs/quota/xfs_quota_priv.h if (((d) = (h)->qh_next)) \
h 123 fs/xfs/quota/xfs_quota_priv.h (dqp)->PVP = &((h)->qh_next); \
h 124 fs/xfs/quota/xfs_quota_priv.h (h)->qh_next = dqp; \
h 125 fs/xfs/quota/xfs_quota_priv.h (h)->qh_version++; \
h 126 fs/xfs/quota/xfs_quota_priv.h (h)->qh_nelems++; \
h 137 fs/xfs/quota/xfs_quota_priv.h _LIST_INSERT(h, dqp, HL_PREVP, HL_NEXT)
h 140 fs/xfs/quota/xfs_quota_priv.h xfs_qm_freelist_append(h, dqp)
h 143 fs/xfs/quota/xfs_quota_priv.h _LIST_INSERT(h, dqp, MPL_PREVP, MPL_NEXT)
h 146 fs/xfs/quota/xfs_quota_priv.h _LIST_REMOVE(h, dqp, HL_PREVP, HL_NEXT)
h 150 fs/xfs/quota/xfs_quota_priv.h { _LIST_REMOVE(h, dqp, MPL_PREVP, MPL_NEXT); \
h 317 include/asm-cris/arch-v32/hwregs/extmem_defs.h unsigned int h : 32;
h 13 include/asm-cris/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
h 14 include/asm-cris/dma-mapping.h #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
h 283 include/asm-frv/bitops.h struct { u32 h, l; };
h 304 include/asm-frv/bitops.h : "0r"(_.h), "r"(_.l)
h 365 include/asm-frv/bitops.h struct { u32 h, l; };
h 380 include/asm-frv/bitops.h : "0r"(_.h), "r"(_.l)
h 10 include/asm-frv/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
h 11 include/asm-frv/dma-mapping.h #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
h 20 include/asm-generic/bitops/fls64.h __u32 h = x >> 32;
h 21 include/asm-generic/bitops/fls64.h if (h)
h 22 include/asm-generic/bitops/fls64.h return fls(h) + 32;
h 19 include/asm-generic/dma-mapping-broken.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
h 20 include/asm-generic/dma-mapping-broken.h #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
h 267 include/asm-generic/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
h 268 include/asm-generic/dma-mapping.h #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
h 195 include/asm-generic/rtc.h struct rtc_time h;
h 197 include/asm-generic/rtc.h get_rtc_time(&h);
h 198 include/asm-generic/rtc.h return h.tm_sec;
h 53 include/asm-m68k/rtc.h struct rtc_time h;
h 55 include/asm-m68k/rtc.h get_rtc_time(&h);
h 56 include/asm-m68k/rtc.h return h.tm_sec;
h 26 include/asm-mn10300/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
h 27 include/asm-mn10300/dma-mapping.h #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
h 77 include/asm-parisc/pci.h #define HBA_PORT_BASE(h) ((h) << HBA_PORT_SPACE_BITS)
h 115 include/asm-parisc/rtc.h struct rtc_time h;
h 117 include/asm-parisc/rtc.h get_rtc_time(&h);
h 118 include/asm-parisc/rtc.h return h.tm_sec;
h 16 include/asm-parisc/system.h unsigned int h:1;
h 95 include/asm-um/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
h 96 include/asm-um/dma-mapping.h #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
h 85 include/asm-x86/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
h 86 include/asm-x86/dma-mapping.h #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
h 20 include/asm-x86/hugetlb.h struct hstate *h = hstate_file(file);
h 21 include/asm-x86/hugetlb.h if (len & ~huge_page_mask(h))
h 23 include/asm-x86/hugetlb.h if (addr & ~huge_page_mask(h))
h 225 include/asm-x86/msr.h rdmsr(msr_no, *l, *h);
h 230 include/asm-x86/msr.h wrmsr(msr_no, l, h);
h 236 include/asm-x86/msr.h return rdmsr_safe(msr_no, l, h);
h 240 include/asm-x86/msr.h return wrmsr_safe(msr_no, l, h);
h 27 include/asm-xtensa/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
h 28 include/asm-xtensa/dma-mapping.h #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
h 117 include/drm/i810_drm.h unsigned int h;
h 135 include/drm/i810_drm.h unsigned int h;
h 167 include/drm/i830_drm.h unsigned int h;
h 57 include/drm/i915_drm.h unsigned int h;
h 220 include/linux/aio.h return list_entry(h, struct kiocb, ki_list);
h 237 include/linux/amba/clcd.h #define CHECK(e,l,h) (var->e < l || var->e > h)
h 193 include/linux/byteorder/swab.h __u32 h = x >> 32;
h 195 include/linux/byteorder/swab.h return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
h 65 include/linux/dvb/video.h int h;
h 231 include/linux/hugetlb.h return (unsigned long)PAGE_SIZE << h->order;
h 236 include/linux/hugetlb.h return h->mask;
h 241 include/linux/hugetlb.h return h->order;
h 246 include/linux/hugetlb.h return h->order + PAGE_SHIFT;
h 251 include/linux/hugetlb.h return 1 << h->order;
h 256 include/linux/hugetlb.h return huge_page_size(h) / 512;
h 553 include/linux/list.h h->next = NULL;
h 554 include/linux/list.h h->pprev = NULL;
h 559 include/linux/list.h return !h->pprev;
h 564 include/linux/list.h return !h->first;
h 593 include/linux/list.h struct hlist_node *first = h->first;
h 597 include/linux/list.h h->first = n;
h 598 include/linux/list.h n->pprev = &h->first;
h 84 include/linux/nfsd/export.h struct cache_head h;
h 103 include/linux/nfsd/export.h struct cache_head h;
h 143 include/linux/nfsd/export.h cache_put(&exp->h, &svc_export_cache);
h 148 include/linux/nfsd/export.h cache_get(&exp->h);
h 176 include/linux/pkt_cls.h #define TC_U32_HTID(h) ((h)&0xFFF00000)
h 177 include/linux/pkt_cls.h #define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20)
h 178 include/linux/pkt_cls.h #define TC_U32_HASH(h) (((h)>>12)&0xFF)
h 179 include/linux/pkt_cls.h #define TC_U32_NODE(h) ((h)&0xFFF)
h 180 include/linux/pkt_cls.h #define TC_U32_KEY(h) ((h)&0xFFFFF)
h 68 include/linux/pkt_sched.h #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
h 69 include/linux/pkt_sched.h #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
h 357 include/linux/quota.h # /* nodep */ include <sys/cdefs.h>
h 305 include/linux/rculist.h struct hlist_node *first = h->first;
h 308 include/linux/rculist.h n->pprev = &h->first;
h 309 include/linux/rculist.h rcu_assign_pointer(h->first, n);
h 1212 include/linux/reiserfs_fs.h #define PATH_H_PBUFFER(p_s_path, h) PATH_OFFSET_PBUFFER (p_s_path, p_s_path->path_length - (h)) /* tb->S[h] */
h 1213 include/linux/reiserfs_fs.h #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */
h 1214 include/linux/reiserfs_fs.h #define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
h 1215 include/linux/reiserfs_fs.h #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */
h 13 include/linux/romfs_fs.h #define __mkw(h,l) (((h)&0x00ff)<< 8|((l)&0x00ff))
h 14 include/linux/romfs_fs.h #define __mkl(h,l) (((h)&0xffff)<<16|((l)&0xffff))
h 74 include/linux/sunrpc/cache.h struct cache_head *h,
h 81 include/linux/sunrpc/cache.h struct cache_head *h);
h 142 include/linux/sunrpc/cache.h kref_get(&h->ref);
h 143 include/linux/sunrpc/cache.h return h;
h 149 include/linux/sunrpc/cache.h if (atomic_read(&h->ref.refcount) <= 2 &&
h 150 include/linux/sunrpc/cache.h h->expiry_time < cd->nextcheck)
h 151 include/linux/sunrpc/cache.h cd->nextcheck = h->expiry_time;
h 152 include/linux/sunrpc/cache.h kref_put(&h->ref, cd->cache_put);
h 163 include/linux/sunrpc/cache.h return (h->expiry_time != 0 && test_bit(CACHE_VALID, &h->flags));
h 75 include/linux/swab.h __u32 h = val >> 32;
h 77 include/linux/swab.h return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h)));
h 903 include/net/bluetooth/hci.h #define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12))
h 904 include/net/bluetooth/hci.h #define hci_handle(h) (h & 0x0fff)
h 905 include/net/bluetooth/hci.h #define hci_flags(h) (h >> 12)
h 246 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash;
h 247 include/net/bluetooth/hci_core.h INIT_LIST_HEAD(&h->list);
h 248 include/net/bluetooth/hci_core.h spin_lock_init(&h->lock);
h 249 include/net/bluetooth/hci_core.h h->acl_num = 0;
h 250 include/net/bluetooth/hci_core.h h->sco_num = 0;
h 255 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash;
h 256 include/net/bluetooth/hci_core.h list_add(&c->list, &h->list);
h 258 include/net/bluetooth/hci_core.h h->acl_num++;
h 260 include/net/bluetooth/hci_core.h h->sco_num++;
h 265 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash;
h 268 include/net/bluetooth/hci_core.h h->acl_num--;
h 270 include/net/bluetooth/hci_core.h h->sco_num--;
h 276 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash;
h 280 include/net/bluetooth/hci_core.h list_for_each(p, &h->list) {
h 291 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash;
h 295 include/net/bluetooth/hci_core.h list_for_each(p, &h->list) {
h 306 include/net/bluetooth/hci_core.h struct hci_conn_hash *h = &hdev->conn_hash;
h 310 include/net/bluetooth/hci_core.h list_for_each(p, &h->list) {
h 146 include/net/netfilter/nf_conntrack_tuple.h ((enum ip_conntrack_dir)(h)->tuple.dst.dir)
h 44 include/net/raw.h struct raw_hashinfo *h;
h 276 include/net/sch_generic.h unsigned int h;
h 278 include/net/sch_generic.h h = qdisc_class_hash(id, hash->hashmask);
h 279 include/net/sch_generic.h hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
h 636 include/net/sctp/sctp.h int h = (lport << 16) + rport;
h 637 include/net/sctp/sctp.h h ^= h>>8;
h 638 include/net/sctp/sctp.h return (h & (sctp_assoc_hashsize - 1));
h 647 include/net/sctp/sctp.h int h = (lport << 16) + rport;
h 648 include/net/sctp/sctp.h h ^= vtag;
h 649 include/net/sctp/sctp.h return (h & (sctp_assoc_hashsize-1));
h 602 include/net/sock.h } h;
h 98 include/scsi/scsi_transport_spi.h #define spi_signalling(h) (((struct spi_host_attrs *)(h)->shost_data)->signalling)
h 228 include/video/pm3fb.h #define PM3VideoOverlayHeight_HEIGHT(h) (((h) & 0xfff) << 0)
h 655 include/video/pm3fb.h #define PM3FBWriteMode_StripeHeight(h) (((h) & 0x7) << 9)
h 685 include/video/pm3fb.h #define PM3LBDestReadMode_StripeHeight(h) (((h) & 0x7) << 5)
h 706 include/video/pm3fb.h #define PM3LBSourceReadMode_StripeHeight(h) (((h) & 0x7) << 5)
h 726 include/video/pm3fb.h #define PM3LBWriteMode_StripeHeight(h) (((h) & 0x7) << 6)
h 974 include/video/pm3fb.h #define PM3Render2D_Height(h) (((h) & 0x0fff) << 16)
h 981 include/video/pm3fb.h #define PM3Render2DGlyph_Height(h) (((h) & 0x7f) << 7)
h 123 include/xen/interface/vcpu.h GUEST_HANDLE(vcpu_runstate_info) h;
h 242 ipc/msg.c tmp = h->next;
h 243 ipc/msg.c while (tmp != h) {
h 565 ipc/shm.c struct hstate *h = hstate_file(shp->shm_file);
h 566 ipc/shm.c *rss += pages_per_huge_page(h) * mapping->nrpages;
h 1013 kernel/auditfilter.c int h = audit_hash_ino((u32)ino);
h 1015 kernel/auditfilter.c list_add_rcu(&nentry->list, &audit_inode_hash[h]);
h 1108 kernel/auditfilter.c int h;
h 1112 kernel/auditfilter.c for (h = 0; h < AUDIT_INODE_BUCKETS; h++) {
h 1113 kernel/auditfilter.c list = &audit_inode_hash[h];
h 1270 kernel/auditfilter.c int h, err;
h 1281 kernel/auditfilter.c h = audit_hash_ino(inode_f->val);
h 1282 kernel/auditfilter.c list = &audit_inode_hash[h];
h 1311 kernel/auditfilter.c h = audit_hash_ino((u32)watch->ino);
h 1312 kernel/auditfilter.c list = &audit_inode_hash[h];
h 1356 kernel/auditfilter.c int h, ret = 0;
h 1367 kernel/auditfilter.c h = audit_hash_ino(inode_f->val);
h 1368 kernel/auditfilter.c list = &audit_inode_hash[h];
h 703 kernel/auditsc.c int h = audit_hash_ino((u32)n->ino);
h 704 kernel/auditsc.c struct list_head *list = &audit_inode_hash[h];
h 186 kernel/softirq.c struct softirq_action *h;
h 204 kernel/softirq.c h = softirq_vec;
h 208 kernel/softirq.c h->action(h);
h 211 kernel/softirq.c h++;
h 77 kernel/user.c struct hlist_node *h;
h 79 kernel/user.c hlist_for_each_entry(user, h, hashent, uidhash_node) {
h 336 lib/inflate.c int h; /* table level */
h 442 lib/inflate.c h = -1; /* no tables yet--level -1 */
h 462 lib/inflate.c h++;
h 487 lib/inflate.c if (h)
h 496 lib/inflate.c u[h] = ++q; /* table starts after link */
h 500 lib/inflate.c if (h)
h 502 lib/inflate.c x[h] = i; /* save pattern for backing up */
h 507 lib/inflate.c u[h-1][j] = r; /* connect to last table */
h 541 lib/inflate.c while ((i & ((1 << w) - 1)) != x[h])
h 543 lib/inflate.c h--; /* don't need to update q */
h 1086 lib/inflate.c unsigned h; /* maximum struct huft's malloc'ed */
h 1095 lib/inflate.c h = 0;
h 1104 lib/inflate.c if (hufts > h)
h 1105 lib/inflate.c h = hufts;
h 1122 lib/inflate.c fprintf(stderr, "<%u> ", h);
h 142 lib/zlib_deflate/deflate.c #define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
h 418 lib/zlib_deflate/deftree.c int h; /* heap index */
h 432 lib/zlib_deflate/deftree.c for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
h 433 lib/zlib_deflate/deftree.c n = s->heap[h];
h 474 lib/zlib_deflate/deftree.c m = s->heap[--h];
h 44 mm/hugetlb.c for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
h 217 mm/hugetlb.c return ((address - vma->vm_start) >> huge_page_shift(h)) +
h 218 mm/hugetlb.c (vma->vm_pgoff >> huge_page_order(h));
h 328 mm/hugetlb.c h->resv_huge_pages--;
h 334 mm/hugetlb.c h->resv_huge_pages--;
h 372 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 375 mm/hugetlb.c for (i = 0; i < pages_per_huge_page(h); i++) {
h 384 mm/hugetlb.c list_add(&page->lru, &h->hugepage_freelists[nid]);
h 385 mm/hugetlb.c h->free_huge_pages++;
h 386 mm/hugetlb.c h->free_huge_pages_node[nid]++;
h 395 mm/hugetlb.c if (!list_empty(&h->hugepage_freelists[nid])) {
h 396 mm/hugetlb.c page = list_entry(h->hugepage_freelists[nid].next,
h 399 mm/hugetlb.c h->free_huge_pages--;
h 400 mm/hugetlb.c h->free_huge_pages_node[nid]--;
h 426 mm/hugetlb.c h->free_huge_pages - h->resv_huge_pages == 0)
h 430 mm/hugetlb.c if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
h 437 mm/hugetlb.c !list_empty(&h->hugepage_freelists[nid])) {
h 438 mm/hugetlb.c page = list_entry(h->hugepage_freelists[nid].next,
h 441 mm/hugetlb.c h->free_huge_pages--;
h 442 mm/hugetlb.c h->free_huge_pages_node[nid]--;
h 445 mm/hugetlb.c decrement_hugepage_resv_vma(h, vma);
h 458 mm/hugetlb.c h->nr_huge_pages--;
h 459 mm/hugetlb.c h->nr_huge_pages_node[page_to_nid(page)]--;
h 460 mm/hugetlb.c for (i = 0; i < pages_per_huge_page(h); i++) {
h 468 mm/hugetlb.c __free_pages(page, huge_page_order(h));
h 473 mm/hugetlb.c struct hstate *h;
h 475 mm/hugetlb.c for_each_hstate(h) {
h 476 mm/hugetlb.c if (huge_page_size(h) == size)
h 477 mm/hugetlb.c return h;
h 488 mm/hugetlb.c struct hstate *h = page_hstate(page);
h 498 mm/hugetlb.c if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
h 499 mm/hugetlb.c update_and_free_page(h, page);
h 500 mm/hugetlb.c h->surplus_huge_pages--;
h 501 mm/hugetlb.c h->surplus_huge_pages_node[nid]--;
h 503 mm/hugetlb.c enqueue_huge_page(h, page);
h 528 mm/hugetlb.c if (delta < 0 && !h->surplus_huge_pages_node[nid])
h 531 mm/hugetlb.c if (delta > 0 && h->surplus_huge_pages_node[nid] >=
h 532 mm/hugetlb.c h->nr_huge_pages_node[nid])
h 535 mm/hugetlb.c h->surplus_huge_pages += delta;
h 536 mm/hugetlb.c h->surplus_huge_pages_node[nid] += delta;
h 549 mm/hugetlb.c h->nr_huge_pages++;
h 550 mm/hugetlb.c h->nr_huge_pages_node[nid]++;
h 559 mm/hugetlb.c if (h->order >= MAX_ORDER)
h 565 mm/hugetlb.c huge_page_order(h));
h 568 mm/hugetlb.c __free_pages(page, huge_page_order(h));
h 571 mm/hugetlb.c prep_new_huge_page(h, page, nid);
h 591 mm/hugetlb.c next_nid = next_node(h->hugetlb_next_nid, node_online_map);
h 594 mm/hugetlb.c h->hugetlb_next_nid = next_nid;
h 605 mm/hugetlb.c start_nid = h->hugetlb_next_nid;
h 608 mm/hugetlb.c page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
h 611 mm/hugetlb.c next_nid = hstate_next_node(h);
h 612 mm/hugetlb.c } while (!page && h->hugetlb_next_nid != start_nid);
h 628 mm/hugetlb.c if (h->order >= MAX_ORDER)
h 655 mm/hugetlb.c if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
h 659 mm/hugetlb.c h->nr_huge_pages++;
h 660 mm/hugetlb.c h->surplus_huge_pages++;
h 666 mm/hugetlb.c huge_page_order(h));
h 669 mm/hugetlb.c __free_pages(page, huge_page_order(h));
h 686 mm/hugetlb.c h->nr_huge_pages_node[nid]++;
h 687 mm/hugetlb.c h->surplus_huge_pages_node[nid]++;
h 690 mm/hugetlb.c h->nr_huge_pages--;
h 691 mm/hugetlb.c h->surplus_huge_pages--;
h 710 mm/hugetlb.c needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
h 712 mm/hugetlb.c h->resv_huge_pages += delta;
h 723 mm/hugetlb.c page = alloc_buddy_huge_page(h, NULL, 0);
h 744 mm/hugetlb.c needed = (h->resv_huge_pages + delta) -
h 745 mm/hugetlb.c (h->free_huge_pages + allocated);
h 758 mm/hugetlb.c h->resv_huge_pages += delta;
h 766 mm/hugetlb.c enqueue_huge_page(h, page);
h 810 mm/hugetlb.c h->resv_huge_pages -= unused_resv_pages;
h 813 mm/hugetlb.c if (h->order >= MAX_ORDER)
h 816 mm/hugetlb.c nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
h 823 mm/hugetlb.c if (!h->surplus_huge_pages_node[nid])
h 826 mm/hugetlb.c if (!list_empty(&h->hugepage_freelists[nid])) {
h 827 mm/hugetlb.c page = list_entry(h->hugepage_freelists[nid].next,
h 830 mm/hugetlb.c update_and_free_page(h, page);
h 831 mm/hugetlb.c h->free_huge_pages--;
h 832 mm/hugetlb.c h->free_huge_pages_node[nid]--;
h 833 mm/hugetlb.c h->surplus_huge_pages--;
h 834 mm/hugetlb.c h->surplus_huge_pages_node[nid]--;
h 857 mm/hugetlb.c pgoff_t idx = vma_hugecache_offset(h, vma, addr);
h 866 mm/hugetlb.c pgoff_t idx = vma_hugecache_offset(h, vma, addr);
h 882 mm/hugetlb.c pgoff_t idx = vma_hugecache_offset(h, vma, addr);
h 886 mm/hugetlb.c pgoff_t idx = vma_hugecache_offset(h, vma, addr);
h 897 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 910 mm/hugetlb.c chg = vma_needs_reservation(h, vma, addr);
h 918 mm/hugetlb.c page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
h 922 mm/hugetlb.c page = alloc_buddy_huge_page(h, vma, addr);
h 932 mm/hugetlb.c vma_commit_reservation(h, vma, addr);
h 946 mm/hugetlb.c NODE_DATA(h->hugetlb_next_nid),
h 947 mm/hugetlb.c huge_page_size(h), huge_page_size(h), 0);
h 959 mm/hugetlb.c hstate_next_node(h);
h 965 mm/hugetlb.c BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
h 968 mm/hugetlb.c m->hstate = h;
h 979 mm/hugetlb.c struct hstate *h = m->hstate;
h 982 mm/hugetlb.c prep_compound_page(page, h->order);
h 983 mm/hugetlb.c prep_new_huge_page(h, page, page_to_nid(page));
h 991 mm/hugetlb.c for (i = 0; i < h->max_huge_pages; ++i) {
h 992 mm/hugetlb.c if (h->order >= MAX_ORDER) {
h 993 mm/hugetlb.c if (!alloc_bootmem_huge_page(h))
h 995 mm/hugetlb.c } else if (!alloc_fresh_huge_page(h))
h 998 mm/hugetlb.c h->max_huge_pages = i;
h 1003 mm/hugetlb.c struct hstate *h;
h 1005 mm/hugetlb.c for_each_hstate(h) {
h 1007 mm/hugetlb.c if (h->order < MAX_ORDER)
h 1008 mm/hugetlb.c hugetlb_hstate_alloc_pages(h);
h 1025 mm/hugetlb.c struct hstate *h;
h 1027 mm/hugetlb.c for_each_hstate(h) {
h 1031 mm/hugetlb.c memfmt(buf, huge_page_size(h)),
h 1032 mm/hugetlb.c h->free_huge_pages);
h 1041 mm/hugetlb.c if (h->order >= MAX_ORDER)
h 1046 mm/hugetlb.c struct list_head *freel = &h->hugepage_freelists[i];
h 1048 mm/hugetlb.c if (count >= h->nr_huge_pages)
h 1053 mm/hugetlb.c update_and_free_page(h, page);
h 1054 mm/hugetlb.c h->free_huge_pages--;
h 1055 mm/hugetlb.c h->free_huge_pages_node[page_to_nid(page)]--;
h 1065 mm/hugetlb.c #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
h 1070 mm/hugetlb.c if (h->order >= MAX_ORDER)
h 1071 mm/hugetlb.c return h->max_huge_pages;
h 1085 mm/hugetlb.c while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
h 1086 mm/hugetlb.c if (!adjust_pool_surplus(h, -1))
h 1090 mm/hugetlb.c while (count > persistent_huge_pages(h)) {
h 1097 mm/hugetlb.c ret = alloc_fresh_huge_page(h);
h 1119 mm/hugetlb.c min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
h 1121 mm/hugetlb.c try_to_free_low(h, min_count);
h 1122 mm/hugetlb.c while (min_count < persistent_huge_pages(h)) {
h 1123 mm/hugetlb.c struct page *page = dequeue_huge_page(h);
h 1126 mm/hugetlb.c update_and_free_page(h, page);
h 1128 mm/hugetlb.c while (count < persistent_huge_pages(h)) {
h 1129 mm/hugetlb.c if (!adjust_pool_surplus(h, 1))
h 1133 mm/hugetlb.c ret = persistent_huge_pages(h);
h 1161 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj);
h 1162 mm/hugetlb.c return sprintf(buf, "%lu\n", h->nr_huge_pages);
h 1169 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj);
h 1175 mm/hugetlb.c h->max_huge_pages = set_max_huge_pages(h, input);
h 1184 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj);
h 1185 mm/hugetlb.c return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
h 1192 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj);
h 1199 mm/hugetlb.c h->nr_overcommit_huge_pages = input;
h 1209 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj);
h 1210 mm/hugetlb.c return sprintf(buf, "%lu\n", h->free_huge_pages);
h 1217 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj);
h 1218 mm/hugetlb.c return sprintf(buf, "%lu\n", h->resv_huge_pages);
h 1225 mm/hugetlb.c struct hstate *h = kobj_to_hstate(kobj);
h 1226 mm/hugetlb.c return sprintf(buf, "%lu\n", h->surplus_huge_pages);
h 1247 mm/hugetlb.c hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
h 1249 mm/hugetlb.c if (!hstate_kobjs[h - hstates])
h 1252 mm/hugetlb.c retval = sysfs_create_group(hstate_kobjs[h - hstates],
h 1255 mm/hugetlb.c kobject_put(hstate_kobjs[h - hstates]);
h 1262 mm/hugetlb.c struct hstate *h;
h 1269 mm/hugetlb.c for_each_hstate(h) {
h 1270 mm/hugetlb.c err = hugetlb_sysfs_add_hstate(h);
h 1273 mm/hugetlb.c h->name);
h 1279 mm/hugetlb.c struct hstate *h;
h 1281 mm/hugetlb.c for_each_hstate(h) {
h 1282 mm/hugetlb.c kobject_put(hstate_kobjs[h - hstates]);
h 1322 mm/hugetlb.c struct hstate *h;
h 1331 mm/hugetlb.c h = &hstates[max_hstate++];
h 1332 mm/hugetlb.c h->order = order;
h 1333 mm/hugetlb.c h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
h 1334 mm/hugetlb.c h->nr_huge_pages = 0;
h 1335 mm/hugetlb.c h->free_huge_pages = 0;
h 1337 mm/hugetlb.c INIT_LIST_HEAD(&h->hugepage_freelists[i]);
h 1338 mm/hugetlb.c h->hugetlb_next_nid = first_node(node_online_map);
h 1339 mm/hugetlb.c snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
h 1340 mm/hugetlb.c huge_page_size(h)/1024);
h 1342 mm/hugetlb.c parsed_hstate = h;
h 1405 mm/hugetlb.c struct hstate *h = &default_hstate;
h 1409 mm/hugetlb.c tmp = h->max_huge_pages;
h 1416 mm/hugetlb.c h->max_huge_pages = set_max_huge_pages(h, tmp);
h 1437 mm/hugetlb.c struct hstate *h = &default_hstate;
h 1441 mm/hugetlb.c tmp = h->nr_overcommit_huge_pages;
h 1449 mm/hugetlb.c h->nr_overcommit_huge_pages = tmp;
h 1460 mm/hugetlb.c struct hstate *h = &default_hstate;
h 1467 mm/hugetlb.c h->nr_huge_pages,
h 1468 mm/hugetlb.c h->free_huge_pages,
h 1469 mm/hugetlb.c h->resv_huge_pages,
h 1470 mm/hugetlb.c h->surplus_huge_pages,
h 1471 mm/hugetlb.c 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
h 1476 mm/hugetlb.c struct hstate *h = &default_hstate;
h 1481 mm/hugetlb.c nid, h->nr_huge_pages_node[nid],
h 1482 mm/hugetlb.c nid, h->free_huge_pages_node[nid],
h 1483 mm/hugetlb.c nid, h->surplus_huge_pages_node[nid]);
h 1489 mm/hugetlb.c struct hstate *h = &default_hstate;
h 1490 mm/hugetlb.c return h->nr_huge_pages * pages_per_huge_page(h);
h 1516 mm/hugetlb.c if (gather_surplus_pages(h, delta) < 0)
h 1519 mm/hugetlb.c if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
h 1520 mm/hugetlb.c return_unused_surplus_pages(h, delta);
h 1527 mm/hugetlb.c return_unused_surplus_pages(h, (unsigned long) -delta);
h 1552 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 1559 mm/hugetlb.c start = vma_hugecache_offset(h, vma, vma->vm_start);
h 1560 mm/hugetlb.c end = vma_hugecache_offset(h, vma, vma->vm_end);
h 1568 mm/hugetlb.c hugetlb_acct_memory(h, -reserve);
h 1628 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 1629 mm/hugetlb.c unsigned long sz = huge_page_size(h);
h 1673 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 1674 mm/hugetlb.c unsigned long sz = huge_page_size(h);
h 1684 mm/hugetlb.c BUG_ON(start & ~huge_page_mask(h));
h 1685 mm/hugetlb.c BUG_ON(end & ~huge_page_mask(h));
h 1794 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 1855 mm/hugetlb.c ptep = huge_pte_offset(mm, address & huge_page_mask(h));
h 1877 mm/hugetlb.c idx = vma_hugecache_offset(h, vma, address);
h 1885 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 1906 mm/hugetlb.c idx = vma_hugecache_offset(h, vma, address);
h 1915 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h);
h 1923 mm/hugetlb.c clear_huge_page(page, address, huge_page_size(h));
h 1939 mm/hugetlb.c inode->i_blocks += blocks_per_huge_page(h);
h 1952 mm/hugetlb.c if (vma_needs_reservation(h, vma, address) < 0) {
h 1958 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h);
h 1996 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 1998 mm/hugetlb.c ptep = huge_pte_alloc(mm, address, huge_page_size(h));
h 2025 mm/hugetlb.c if (vma_needs_reservation(h, vma, address) < 0) {
h 2031 mm/hugetlb.c pagecache_page = hugetlbfs_pagecache_page(h,
h 2071 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 2083 mm/hugetlb.c pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
h 2101 mm/hugetlb.c pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
h 2117 mm/hugetlb.c pfn_offset < pages_per_huge_page(h)) {
h 2139 mm/hugetlb.c struct hstate *h = hstate_vma(vma);
h 2146 mm/hugetlb.c for (; address < end; address += huge_page_size(h)) {
h 2169 mm/hugetlb.c struct hstate *h = hstate_inode(inode);
h 2198 mm/hugetlb.c ret = hugetlb_acct_memory(h, chg);
h 2210 mm/hugetlb.c struct hstate *h = hstate_inode(inode);
h 2214 mm/hugetlb.c inode->i_blocks -= blocks_per_huge_page(h);
h 2218 mm/hugetlb.c hugetlb_acct_memory(h, -(chg - freed));
h 2222 mm/mempolicy.c struct hstate *h = hstate_vma(vma);
h 2223 mm/mempolicy.c unsigned long sz = huge_page_size(h);
h 2227 mm/mempolicy.c addr & huge_page_mask(h));
h 1179 mm/slub.c page = container_of((struct list_head *)h, struct page, lru);
h 2406 mm/slub.c struct page *page, *h;
h 2409 mm/slub.c list_for_each_entry_safe(page, h, &n->partial, lru) {
h 268 net/bluetooth/bnep/core.c struct bnep_ext_hdr *h;
h 272 net/bluetooth/bnep/core.c h = (void *) skb->data;
h 273 net/bluetooth/bnep/core.c if (!skb_pull(skb, sizeof(*h))) {
h 278 net/bluetooth/bnep/core.c BT_DBG("type 0x%x len %d", h->type, h->len);
h 280 net/bluetooth/bnep/core.c switch (h->type & BNEP_TYPE_MASK) {
h 290 net/bluetooth/bnep/core.c if (!skb_pull(skb, h->len)) {
h 294 net/bluetooth/bnep/core.c } while (!err && (h->type & BNEP_EXT_HEADER));
h 536 net/bluetooth/hci_conn.c struct hci_conn_hash *h = &hdev->conn_hash;
h 541 net/bluetooth/hci_conn.c p = h->list.next;
h 542 net/bluetooth/hci_conn.c while (p != &h->list) {
h 975 net/bluetooth/hci_core.c struct hci_event_hdr *h = data;
h 976 net/bluetooth/hci_core.c len = HCI_EVENT_HDR_SIZE + h->plen;
h 983 net/bluetooth/hci_core.c struct hci_acl_hdr *h = data;
h 984 net/bluetooth/hci_core.c len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
h 991 net/bluetooth/hci_core.c struct hci_sco_hdr *h = data;
h 992 net/bluetooth/hci_core.c len = HCI_SCO_HDR_SIZE + h->dlen;
h 1278 net/bluetooth/hci_core.c struct hci_conn_hash *h = &hdev->conn_hash;
h 1285 net/bluetooth/hci_core.c list_for_each(p, &h->list) {
h 1316 net/bluetooth/hci_core.c struct hci_conn_hash *h = &hdev->conn_hash;
h 1323 net/bluetooth/hci_core.c list_for_each(p, &h->list) {
h 89 net/bridge/br_fdb.c struct hlist_node *h;
h 90 net/bridge/br_fdb.c hlist_for_each(h, &br->hash[i]) {
h 93 net/bridge/br_fdb.c f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
h 129 net/bridge/br_fdb.c struct hlist_node *h, *n;
h 131 net/bridge/br_fdb.c hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
h 157 net/bridge/br_fdb.c struct hlist_node *h, *n;
h 158 net/bridge/br_fdb.c hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) {
h 177 net/bridge/br_fdb.c struct hlist_node *h, *g;
h 179 net/bridge/br_fdb.c hlist_for_each_safe(h, g, &br->hash[i]) {
h 181 net/bridge/br_fdb.c = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
h 215 net/bridge/br_fdb.c struct hlist_node *h;
h 218 net/bridge/br_fdb.c hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
h 266 net/bridge/br_fdb.c struct hlist_node *h;
h 273 net/bridge/br_fdb.c hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
h 309 net/bridge/br_fdb.c struct hlist_node *h;
h 312 net/bridge/br_fdb.c hlist_for_each_entry_rcu(fdb, h, head, hlist) {
h 109 net/bridge/netfilter/ebtables.c if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
h 112 net/bridge/netfilter/ebtables.c FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
h 129 net/bridge/netfilter/ebtables.c verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
h 137 net/bridge/netfilter/ebtables.c verdict |= (h->h_dest[i] ^ e->destmac[i]) &
h 309 net/bridge/netfilter/ebtables.c #define find_inlist_lock(h,n,p,e,m) find_inlist_lock_noload((h),(n),(e),(m))
h 591 net/core/neighbour.c u32 h;
h 593 net/core/neighbour.c for (h = 0; h <= PNEIGH_HASHMASK; h++) {
h 594 net/core/neighbour.c np = &tbl->phash_buckets[h];
h 2084 net/core/neighbour.c int rc, h, s_h = cb->args[1];
h 2088 net/core/neighbour.c for (h = 0; h <= tbl->hash_mask; h++) {
h 2089 net/core/neighbour.c if (h < s_h)
h 2091 net/core/neighbour.c if (h > s_h)
h 2093 net/core/neighbour.c for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
h 2113 net/core/neighbour.c cb->args[1] = h;
h 171 net/dccp/ccids/lib/packet_history.c struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h);
h 183 net/dccp/ccids/lib/packet_history.c if (dccp_delta_seqno(tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, seq) <= 0)
h 186 net/dccp/ccids/lib/packet_history.c for (i = 1; i <= h->loss_count; i++)
h 187 net/dccp/ccids/lib/packet_history.c if (tfrc_rx_hist_entry(h, i)->tfrchrx_seqno == seq)
h 196 net/dccp/ccids/lib/packet_history.c const u8 idx_a = tfrc_rx_hist_index(h, a),
h 197 net/dccp/ccids/lib/packet_history.c idx_b = tfrc_rx_hist_index(h, b);
h 198 net/dccp/ccids/lib/packet_history.c struct tfrc_rx_hist_entry *tmp = h->ring[idx_a];
h 200 net/dccp/ccids/lib/packet_history.c h->ring[idx_a] = h->ring[idx_b];
h 201 net/dccp/ccids/lib/packet_history.c h->ring[idx_b] = tmp;
h 215 net/dccp/ccids/lib/packet_history.c u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
h 219 net/dccp/ccids/lib/packet_history.c h->loss_count = 1;
h 220 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1);
h 226 net/dccp/ccids/lib/packet_history.c u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
h 227 net/dccp/ccids/lib/packet_history.c s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
h 231 net/dccp/ccids/lib/packet_history.c h->loss_count = 2;
h 232 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2);
h 239 net/dccp/ccids/lib/packet_history.c u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
h 243 net/dccp/ccids/lib/packet_history.c h->loss_count = 0;
h 244 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 1);
h 247 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2);
h 253 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_swap(h, 0, 3);
h 254 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 3);
h 255 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n2);
h 256 net/dccp/ccids/lib/packet_history.c h->loss_count = 2;
h 263 net/dccp/ccids/lib/packet_history.c u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
h 264 net/dccp/ccids/lib/packet_history.c s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
h 265 net/dccp/ccids/lib/packet_history.c s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
h 269 net/dccp/ccids/lib/packet_history.c h->loss_count = 3;
h 270 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3);
h 280 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_swap(h, 2, 3);
h 281 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3);
h 282 net/dccp/ccids/lib/packet_history.c h->loss_count = 3;
h 289 net/dccp/ccids/lib/packet_history.c u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
h 293 net/dccp/ccids/lib/packet_history.c u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp;
h 297 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 2);
h 298 net/dccp/ccids/lib/packet_history.c h->loss_count = 0;
h 301 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 1);
h 302 net/dccp/ccids/lib/packet_history.c h->loss_count = 1;
h 306 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n3);
h 315 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_swap(h, 0, 3);
h 316 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 3);
h 317 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n3);
h 318 net/dccp/ccids/lib/packet_history.c h->loss_count = 3;
h 332 net/dccp/ccids/lib/packet_history.c u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
h 333 net/dccp/ccids/lib/packet_history.c s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
h 334 net/dccp/ccids/lib/packet_history.c s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno;
h 335 net/dccp/ccids/lib/packet_history.c u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp,
h 336 net/dccp/ccids/lib/packet_history.c n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp;
h 342 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 3);
h 343 net/dccp/ccids/lib/packet_history.c h->loss_count = 0;
h 346 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 2);
h 347 net/dccp/ccids/lib/packet_history.c h->loss_count = 1;
h 351 net/dccp/ccids/lib/packet_history.c h->loss_start = tfrc_rx_hist_index(h, 1);
h 352 net/dccp/ccids/lib/packet_history.c h->loss_count = 2;
h 378 net/dccp/ccids/lib/packet_history.c if (h->loss_count == 0) {
h 379 net/dccp/ccids/lib/packet_history.c __do_track_loss(h, skb, ndp);
h 380 net/dccp/ccids/lib/packet_history.c } else if (h->loss_count == 1) {
h 381 net/dccp/ccids/lib/packet_history.c __one_after_loss(h, skb, ndp);
h 382 net/dccp/ccids/lib/packet_history.c } else if (h->loss_count != 2) {
h 383 net/dccp/ccids/lib/packet_history.c DCCP_BUG("invalid loss_count %d", h->loss_count);
h 384 net/dccp/ccids/lib/packet_history.c } else if (__two_after_loss(h, skb, ndp)) {
h 388 net/dccp/ccids/lib/packet_history.c is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk);
h 389 net/dccp/ccids/lib/packet_history.c __three_after_loss(h);
h 400 net/dccp/ccids/lib/packet_history.c h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC);
h 401 net/dccp/ccids/lib/packet_history.c if (h->ring[i] == NULL)
h 405 net/dccp/ccids/lib/packet_history.c h->loss_count = h->loss_start = 0;
h 410 net/dccp/ccids/lib/packet_history.c kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
h 411 net/dccp/ccids/lib/packet_history.c h->ring[i] = NULL;
h 422 net/dccp/ccids/lib/packet_history.c if (h->ring[i] != NULL) {
h 423 net/dccp/ccids/lib/packet_history.c kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
h 424 net/dccp/ccids/lib/packet_history.c h->ring[i] = NULL;
h 435 net/dccp/ccids/lib/packet_history.c return h->ring[0];
h 444 net/dccp/ccids/lib/packet_history.c return h->ring[h->rtt_sample_prev];
h 456 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
h 459 net/dccp/ccids/lib/packet_history.c if (h->rtt_sample_prev == 2) { /* previous candidate stored */
h 460 net/dccp/ccids/lib/packet_history.c sample = SUB16(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval,
h 461 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
h 464 net/dccp/ccids/lib/packet_history.c ktime_us_delta(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_tstamp,
h 465 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp);
h 474 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval,
h 475 net/dccp/ccids/lib/packet_history.c tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
h 477 net/dccp/ccids/lib/packet_history.c h->rtt_sample_prev = 1;
h 482 net/dccp/ccids/lib/packet_history.c sample = ktime_to_us(net_timedelta(tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp));
h 484 net/dccp/ccids/lib/packet_history.c h->rtt_sample_prev = 2;
h 493 net/dccp/ccids/lib/packet_history.c h->rtt_sample_prev = 0; /* use current entry as next reference */
h 91 net/dccp/ccids/lib/packet_history.h return (h->loss_start + n) & TFRC_NDUPACK;
h 100 net/dccp/ccids/lib/packet_history.h return h->ring[tfrc_rx_hist_index(h, h->loss_count)];
h 109 net/dccp/ccids/lib/packet_history.h return h->ring[tfrc_rx_hist_index(h, n)];
h 118 net/dccp/ccids/lib/packet_history.h return h->ring[h->loss_start];
h 124 net/dccp/ccids/lib/packet_history.h return h->loss_count > 0;
h 943 net/dccp/ipv4.c .h.hashinfo = &dccp_hashinfo,
h 1143 net/dccp/ipv6.c .h.hashinfo = &dccp_hashinfo,
h 1606 net/decnet/dn_route.c int h, s_h;
h 1619 net/decnet/dn_route.c for(h = 0; h <= dn_rt_hash_mask; h++) {
h 1620 net/decnet/dn_route.c if (h < s_h)
h 1622 net/decnet/dn_route.c if (h > s_h)
h 1625 net/decnet/dn_route.c for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
h 1644 net/decnet/dn_route.c cb->args[0] = h;
h 88 net/decnet/dn_table.c u16 h = dn_ntohs(key.datum)>>(16 - dz->dz_order);
h 89 net/decnet/dn_table.c h ^= (h >> 10);
h 90 net/decnet/dn_table.c h ^= (h >> 6);
h 91 net/decnet/dn_table.c h &= DZ_HASHMASK(dz);
h 92 net/decnet/dn_table.c return *(dn_fib_idx_t *)&h;
h 418 net/decnet/dn_table.c int h, s_h;
h 421 net/decnet/dn_table.c for(h = 0; h < dz->dz_divisor; h++) {
h 422 net/decnet/dn_table.c if (h < s_h)
h 424 net/decnet/dn_table.c if (h > s_h)
h 426 net/decnet/dn_table.c if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL)
h 428 net/decnet/dn_table.c if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) {
h 429 net/decnet/dn_table.c cb->args[3] = h;
h 433 net/decnet/dn_table.c cb->args[3] = h;
h 467 net/decnet/dn_table.c unsigned int h, s_h;
h 483 net/decnet/dn_table.c for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
h 485 net/decnet/dn_table.c hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) {
h 500 net/decnet/dn_table.c cb->args[0] = h;
h 815 net/decnet/dn_table.c unsigned int h;
h 823 net/decnet/dn_table.c h = n & (DN_FIB_TABLE_HASHSZ - 1);
h 825 net/decnet/dn_table.c hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) {
h 852 net/decnet/dn_table.c hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
h 872 net/decnet/dn_table.c unsigned int h;
h 874 net/decnet/dn_table.c for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
h 875 net/decnet/dn_table.c hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist)
h 895 net/decnet/dn_table.c unsigned int h;
h 898 net/decnet/dn_table.c for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
h 899 net/decnet/dn_table.c hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h],
h 77 net/ipv4/fib_frontend.c unsigned int h;
h 88 net/ipv4/fib_frontend.c h = id & (FIB_TABLE_HASHSZ - 1);
h 89 net/ipv4/fib_frontend.c hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
h 98 net/ipv4/fib_frontend.c unsigned int h;
h 102 net/ipv4/fib_frontend.c h = id & (FIB_TABLE_HASHSZ - 1);
h 105 net/ipv4/fib_frontend.c head = &net->ipv4.fib_table_hash[h];
h 138 net/ipv4/fib_frontend.c unsigned int h;
h 140 net/ipv4/fib_frontend.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
h 141 net/ipv4/fib_frontend.c head = &net->ipv4.fib_table_hash[h];
h 628 net/ipv4/fib_frontend.c unsigned int h, s_h;
h 642 net/ipv4/fib_frontend.c for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
h 644 net/ipv4/fib_frontend.c head = &net->ipv4.fib_table_hash[h];
h 660 net/ipv4/fib_frontend.c cb->args[0] = h;
h 81 net/ipv4/fib_hash.c u32 h = ntohl(key)>>(32 - fz->fz_order);
h 82 net/ipv4/fib_hash.c h ^= (h>>20);
h 83 net/ipv4/fib_hash.c h ^= (h>>10);
h 84 net/ipv4/fib_hash.c h ^= (h>>5);
h 85 net/ipv4/fib_hash.c h &= FZ_HASHMASK(fz);
h 86 net/ipv4/fib_hash.c return h;
h 728 net/ipv4/fib_hash.c int h, s_h;
h 733 net/ipv4/fib_hash.c for (h = s_h; h < fz->fz_divisor; h++) {
h 734 net/ipv4/fib_hash.c if (hlist_empty(&fz->fz_hash[h]))
h 736 net/ipv4/fib_hash.c if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) {
h 737 net/ipv4/fib_hash.c cb->args[3] = h;
h 743 net/ipv4/fib_hash.c cb->args[3] = h;
h 2220 net/ipv4/fib_trie.c unsigned int h;
h 2227 net/ipv4/fib_trie.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
h 2228 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h];
h 2270 net/ipv4/fib_trie.c unsigned int h;
h 2272 net/ipv4/fib_trie.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
h 2273 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h];
h 2306 net/ipv4/fib_trie.c unsigned int h;
h 2316 net/ipv4/fib_trie.c h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
h 2325 net/ipv4/fib_trie.c while (++h < FIB_TABLE_HASHSZ) {
h 2326 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h];
h 92 net/ipv4/inet_connection_sock.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
h 408 net/ipv4/inet_connection_sock.c const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
h 411 net/ipv4/inet_connection_sock.c reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
h 886 net/ipv4/inet_diag.c const __u16 type = h->idiag_type;
h 895 net/ipv4/inet_diag.c inet_diag_table[type] = h;
h 906 net/ipv4/inet_diag.c const __u16 type = h->idiag_type;
h 72 net/ipv4/inet_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
h 98 net/ipv4/inet_hashtables.c struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
h 340 net/ipv4/inet_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
h 361 net/ipv4/inet_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
h 394 net/ipv4/inet_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
h 420 net/ipv4/inet_timewait_sock.c int h;
h 423 net/ipv4/inet_timewait_sock.c for (h = 0; h < (hashinfo->ehash_size); h++) {
h 425 net/ipv4/inet_timewait_sock.c inet_ehash_bucket(hashinfo, h);
h 426 net/ipv4/inet_timewait_sock.c rwlock_t *lock = inet_ehash_lockp(hashinfo, h);
h 55 net/ipv4/ip_fragment.c struct inet_skb_parm h;
h 239 net/ipv4/ip_gre.c unsigned h = HASH(key);
h 246 net/ipv4/ip_gre.c h ^= HASH(remote);
h 249 net/ipv4/ip_gre.c return &ign->tunnels[prio][h];
h 472 net/ipv4/ip_gre.c u8 *h;
h 486 net/ipv4/ip_gre.c h = skb->data;
h 487 net/ipv4/ip_gre.c flags = *(__be16*)h;
h 511 net/ipv4/ip_gre.c key = *(__be32*)(h + offset);
h 515 net/ipv4/ip_gre.c seqno = ntohl(*(__be32*)(h + offset));
h 520 net/ipv4/ip_gre.c gre_proto = *(__be16 *)(h + 2);
h 537 net/ipv4/ip_gre.c if ((*(h + offset) & 0xF0) != 0x40)
h 1238 net/ipv4/ip_gre.c int h;
h 1239 net/ipv4/ip_gre.c for (h = 0; h < HASH_SIZE; h++) {
h 1241 net/ipv4/ip_gre.c while ((t = ign->tunnels[prio][h]) != NULL)
h 711 net/ipv4/ipconfig.c struct iphdr *h;
h 724 net/ipv4/ipconfig.c h = ip_hdr(skb);
h 725 net/ipv4/ipconfig.c h->version = 4;
h 726 net/ipv4/ipconfig.c h->ihl = 5;
h 727 net/ipv4/ipconfig.c h->tot_len = htons(sizeof(struct bootp_pkt));
h 728 net/ipv4/ipconfig.c h->frag_off = htons(IP_DF);
h 729 net/ipv4/ipconfig.c h->ttl = 64;
h 730 net/ipv4/ipconfig.c h->protocol = IPPROTO_UDP;
h 731 net/ipv4/ipconfig.c h->daddr = htonl(INADDR_BROADCAST);
h 732 net/ipv4/ipconfig.c h->check = ip_fast_csum((unsigned char *) h, h->ihl);
h 851 net/ipv4/ipconfig.c struct iphdr *h;
h 871 net/ipv4/ipconfig.c h = &b->iph;
h 873 net/ipv4/ipconfig.c if (h->ihl != 5 || h->version != 4 || h->protocol != IPPROTO_UDP)
h 877 net/ipv4/ipconfig.c if (h->frag_off & htons(IP_OFFSET | IP_MF)) {
h 884 net/ipv4/ipconfig.c if (skb->len < ntohs(h->tot_len))
h 887 net/ipv4/ipconfig.c if (ip_fast_csum((char *) h, h->ihl))
h 893 net/ipv4/ipconfig.c if (ntohs(h->tot_len) < ntohs(b->udph.len) + sizeof(struct iphdr))
h 909 net/ipv4/ipconfig.c h = &b->iph;
h 170 net/ipv4/ipip.c unsigned h = 0;
h 175 net/ipv4/ipip.c h ^= HASH(remote);
h 179 net/ipv4/ipip.c h ^= HASH(local);
h 181 net/ipv4/ipip.c return &ipn->tunnels[prio][h];
h 760 net/ipv4/ipip.c int h;
h 761 net/ipv4/ipip.c for (h = 0; h < HASH_SIZE; h++) {
h 763 net/ipv4/ipip.c while ((t = ipn->tunnels[prio][h]) != NULL)
h 525 net/ipv4/netfilter/arp_tables.c unsigned int h;
h 541 net/ipv4/netfilter/arp_tables.c for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
h 542 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)e - base == hook_entries[h])
h 543 net/ipv4/netfilter/arp_tables.c newinfo->hook_entry[h] = hook_entries[h];
h 544 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)e - base == underflows[h])
h 545 net/ipv4/netfilter/arp_tables.c newinfo->underflow[h] = underflows[h];
h 1204 net/ipv4/netfilter/arp_tables.c int ret, off, h;
h 1248 net/ipv4/netfilter/arp_tables.c for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
h 1249 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)e - base == hook_entries[h])
h 1250 net/ipv4/netfilter/arp_tables.c newinfo->hook_entry[h] = hook_entries[h];
h 1251 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)e - base == underflows[h])
h 1252 net/ipv4/netfilter/arp_tables.c newinfo->underflow[h] = underflows[h];
h 1277 net/ipv4/netfilter/arp_tables.c int ret, h;
h 1294 net/ipv4/netfilter/arp_tables.c for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
h 1295 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)de - base < newinfo->hook_entry[h])
h 1296 net/ipv4/netfilter/arp_tables.c newinfo->hook_entry[h] -= origsize - *size;
h 1297 net/ipv4/netfilter/arp_tables.c if ((unsigned char *)de - base < newinfo->underflow[h])
h 1298 net/ipv4/netfilter/arp_tables.c newinfo->underflow[h] -= origsize - *size;
h 738 net/ipv4/netfilter/ip_tables.c unsigned int h;
h 754 net/ipv4/netfilter/ip_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) {
h 755 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)e - base == hook_entries[h])
h 756 net/ipv4/netfilter/ip_tables.c newinfo->hook_entry[h] = hook_entries[h];
h 757 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)e - base == underflows[h])
h 758 net/ipv4/netfilter/ip_tables.c newinfo->underflow[h] = underflows[h];
h 1546 net/ipv4/netfilter/ip_tables.c int ret, off, h;
h 1595 net/ipv4/netfilter/ip_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) {
h 1596 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)e - base == hook_entries[h])
h 1597 net/ipv4/netfilter/ip_tables.c newinfo->hook_entry[h] = hook_entries[h];
h 1598 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)e - base == underflows[h])
h 1599 net/ipv4/netfilter/ip_tables.c newinfo->underflow[h] = underflows[h];
h 1625 net/ipv4/netfilter/ip_tables.c int ret, h;
h 1646 net/ipv4/netfilter/ip_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) {
h 1647 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)de - base < newinfo->hook_entry[h])
h 1648 net/ipv4/netfilter/ip_tables.c newinfo->hook_entry[h] -= origsize - *size;
h 1649 net/ipv4/netfilter/ip_tables.c if ((unsigned char *)de - base < newinfo->underflow[h])
h 1650 net/ipv4/netfilter/ip_tables.c newinfo->underflow[h] -= origsize - *size;
h 252 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c const struct nf_conntrack_tuple_hash *h;
h 275 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c h = nf_conntrack_find_get(sock_net(sk), &tuple);
h 276 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c if (h) {
h 278 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
h 132 net/ipv4/netfilter/nf_conntrack_proto_icmp.c const struct nf_conntrack_tuple_hash *h;
h 158 net/ipv4/netfilter/nf_conntrack_proto_icmp.c h = nf_conntrack_find_get(net, &innertuple);
h 159 net/ipv4/netfilter/nf_conntrack_proto_icmp.c if (!h) {
h 164 net/ipv4/netfilter/nf_conntrack_proto_icmp.c if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
h 168 net/ipv4/netfilter/nf_conntrack_proto_icmp.c skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
h 150 net/ipv4/netfilter/nf_nat_core.c unsigned int h = hash_by_src(tuple);
h 156 net/ipv4/netfilter/nf_nat_core.c hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
h 87 net/ipv4/raw.c struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
h 90 net/ipv4/raw.c head = &h->ht[inet_sk(sk)->num & (RAW_HTABLE_SIZE - 1)];
h 92 net/ipv4/raw.c write_lock_bh(&h->lock);
h 95 net/ipv4/raw.c write_unlock_bh(&h->lock);
h 101 net/ipv4/raw.c struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
h 103 net/ipv4/raw.c write_lock_bh(&h->lock);
h 106 net/ipv4/raw.c write_unlock_bh(&h->lock);
h 843 net/ipv4/raw.c .h.raw_hash = &raw_v4_hashinfo,
h 860 net/ipv4/raw.c sk_for_each(sk, node, &state->h->ht[state->bucket])
h 880 net/ipv4/raw.c sk = sk_head(&state->h->ht[state->bucket]);
h 900 net/ipv4/raw.c read_lock(&state->h->lock);
h 922 net/ipv4/raw.c read_unlock(&state->h->lock);
h 972 net/ipv4/raw.c i->h = h;
h 2844 net/ipv4/route.c int h, s_h;
h 2854 net/ipv4/route.c for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
h 2855 net/ipv4/route.c if (!rt_hash_table[h].chain)
h 2858 net/ipv4/route.c for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
h 2878 net/ipv4/route.c cb->args[0] = h;
h 2383 net/ipv4/tcp_ipv4.c .h.hashinfo = &tcp_hashinfo,
h 157 net/ipv4/udp.c struct hlist_head *udptable = sk->sk_prot->h.udp_hash;
h 1491 net/ipv4/udp.c .h.udp_hash = udp_hash,
h 53 net/ipv4/udplite.c .h.udp_hash = udplite_hash,
h 132 net/ipv6/addrlabel.c ip6addrlbl_free(container_of(h, struct ip6addrlbl_entry, rcu));
h 700 net/ipv6/exthdrs.c struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt));
h 702 net/ipv6/exthdrs.c memcpy(h, opt, ipv6_optlen(opt));
h 703 net/ipv6/exthdrs.c h->nexthdr = *proto;
h 118 net/ipv6/inet6_connection_sock.c const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
h 122 net/ipv6/inet6_connection_sock.c reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
h 27 net/ipv6/inet6_hashtables.c struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
h 175 net/ipv6/ip6_fib.c unsigned int h;
h 183 net/ipv6/ip6_fib.c h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1);
h 189 net/ipv6/ip6_fib.c hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]);
h 230 net/ipv6/ip6_fib.c unsigned int h;
h 234 net/ipv6/ip6_fib.c h = id & (FIB_TABLE_HASHSZ - 1);
h 236 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h];
h 348 net/ipv6/ip6_fib.c unsigned int h, s_h;
h 384 net/ipv6/ip6_fib.c for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
h 386 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h];
h 399 net/ipv6/ip6_fib.c cb->args[0] = h;
h 1367 net/ipv6/ip6_fib.c unsigned int h;
h 1370 net/ipv6/ip6_fib.c for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
h 1371 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h];
h 169 net/ipv6/ip6_tunnel.c unsigned h = 0;
h 174 net/ipv6/ip6_tunnel.c h = HASH(remote) ^ HASH(local);
h 176 net/ipv6/ip6_tunnel.c return &ip6n->tnls[prio][h];
h 1396 net/ipv6/ip6_tunnel.c int h;
h 1399 net/ipv6/ip6_tunnel.c for (h = 0; h < HASH_SIZE; h++) {
h 1400 net/ipv6/ip6_tunnel.c while ((t = ip6n->tnls_r_l[h]) != NULL)
h 763 net/ipv6/netfilter/ip6_tables.c unsigned int h;
h 779 net/ipv6/netfilter/ip6_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) {
h 780 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)e - base == hook_entries[h])
h 781 net/ipv6/netfilter/ip6_tables.c newinfo->hook_entry[h] = hook_entries[h];
h 782 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)e - base == underflows[h])
h 783 net/ipv6/netfilter/ip6_tables.c newinfo->underflow[h] = underflows[h];
h 1573 net/ipv6/netfilter/ip6_tables.c int ret, off, h;
h 1622 net/ipv6/netfilter/ip6_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) {
h 1623 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)e - base == hook_entries[h])
h 1624 net/ipv6/netfilter/ip6_tables.c newinfo->hook_entry[h] = hook_entries[h];
h 1625 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)e - base == underflows[h])
h 1626 net/ipv6/netfilter/ip6_tables.c newinfo->underflow[h] = underflows[h];
h 1652 net/ipv6/netfilter/ip6_tables.c int ret, h;
h 1673 net/ipv6/netfilter/ip6_tables.c for (h = 0; h < NF_INET_NUMHOOKS; h++) {
h 1674 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)de - base < newinfo->hook_entry[h])
h 1675 net/ipv6/netfilter/ip6_tables.c newinfo->hook_entry[h] -= origsize - *size;
h 1676 net/ipv6/netfilter/ip6_tables.c if ((unsigned char *)de - base < newinfo->underflow[h])
h 1677 net/ipv6/netfilter/ip6_tables.c newinfo->underflow[h] -= origsize - *size;
h 132 net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c const struct nf_conntrack_tuple_hash *h;
h 160 net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c h = nf_conntrack_find_get(net, &intuple);
h 161 net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c if (!h) {
h 165 net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
h 170 net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
h 54 net/ipv6/netfilter/nf_conntrack_reasm.c struct inet6_skb_parm h;
h 1211 net/ipv6/raw.c .h.raw_hash = &raw_v6_hashinfo,
h 59 net/ipv6/reassembly.c struct inet6_skb_parm h;
h 113 net/ipv6/sit.c unsigned h = 0;
h 118 net/ipv6/sit.c h ^= HASH(remote);
h 122 net/ipv6/sit.c h ^= HASH(local);
h 124 net/ipv6/sit.c return &sitn->tunnels[prio][h];
h 994 net/ipv6/sit.c int h;
h 995 net/ipv6/sit.c for (h = 0; h < HASH_SIZE; h++) {
h 997 net/ipv6/sit.c while ((t = sitn->tunnels[prio][h]) != NULL)
h 2050 net/ipv6/tcp_ipv6.c .h.hashinfo = &tcp_hashinfo,
h 1053 net/ipv6/udp.c .h.udp_hash = udp_hash,
h 52 net/ipv6/udplite.c .h.udp_hash = udplite_hash,
h 63 net/ipv6/xfrm6_tunnel.c unsigned h;
h 65 net/ipv6/xfrm6_tunnel.c h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
h 66 net/ipv6/xfrm6_tunnel.c h ^= h >> 16;
h 67 net/ipv6/xfrm6_tunnel.c h ^= h >> 8;
h 68 net/ipv6/xfrm6_tunnel.c h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
h 70 net/ipv6/xfrm6_tunnel.c return h;
h 214 net/irda/irqueue.c __u32 h = 0;
h 218 net/irda/irqueue.c h = (h<<4) + *name++;
h 219 net/irda/irqueue.c if ((g = (h & 0xf0000000)))
h 220 net/irda/irqueue.c h ^=g>>24;
h 221 net/irda/irqueue.c h &=~g;
h 223 net/irda/irqueue.c return h;
h 465 net/mac80211/rx.c #define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
h 258 net/netfilter/core.c int i, h;
h 260 net/netfilter/core.c for (h = 0; h < NF_MAX_HOOKS; h++)
h 261 net/netfilter/core.c INIT_LIST_HEAD(&nf_hooks[i][h]);
h 65 net/netfilter/nf_conntrack_core.c u_int32_t h;
h 72 net/netfilter/nf_conntrack_core.c h = jhash2((u32 *)tuple, n,
h 76 net/netfilter/nf_conntrack_core.c return ((u64)h * size) >> 32;
h 240 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h;
h 248 net/netfilter/nf_conntrack_core.c hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
h 249 net/netfilter/nf_conntrack_core.c if (nf_ct_tuple_equal(tuple, &h->tuple)) {
h 252 net/netfilter/nf_conntrack_core.c return h;
h 266 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h;
h 270 net/netfilter/nf_conntrack_core.c h = __nf_conntrack_find(net, tuple);
h 271 net/netfilter/nf_conntrack_core.c if (h) {
h 272 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h);
h 274 net/netfilter/nf_conntrack_core.c h = NULL;
h 278 net/netfilter/nf_conntrack_core.c return h;
h 312 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h;
h 347 net/netfilter/nf_conntrack_core.c hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
h 349 net/netfilter/nf_conntrack_core.c &h->tuple))
h 351 net/netfilter/nf_conntrack_core.c hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
h 353 net/netfilter/nf_conntrack_core.c &h->tuple))
h 395 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h;
h 403 net/netfilter/nf_conntrack_core.c hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
h 404 net/netfilter/nf_conntrack_core.c if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
h 405 net/netfilter/nf_conntrack_core.c nf_ct_tuple_equal(tuple, &h->tuple)) {
h 425 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h;
h 433 net/netfilter/nf_conntrack_core.c hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
h 435 net/netfilter/nf_conntrack_core.c tmp = nf_ct_tuplehash_to_ctrack(h);
h 623 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h;
h 634 net/netfilter/nf_conntrack_core.c h = nf_conntrack_find_get(net, &tuple);
h 635 net/netfilter/nf_conntrack_core.c if (!h) {
h 636 net/netfilter/nf_conntrack_core.c h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
h 637 net/netfilter/nf_conntrack_core.c if (!h)
h 639 net/netfilter/nf_conntrack_core.c if (IS_ERR(h))
h 640 net/netfilter/nf_conntrack_core.c return (void *)h;
h 642 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h);
h 645 net/netfilter/nf_conntrack_core.c if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
h 949 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h;
h 955 net/netfilter/nf_conntrack_core.c hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
h 956 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h);
h 961 net/netfilter/nf_conntrack_core.c hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) {
h 962 net/netfilter/nf_conntrack_core.c ct = nf_ct_tuplehash_to_ctrack(h);
h 1092 net/netfilter/nf_conntrack_core.c struct nf_conntrack_tuple_hash *h;
h 1118 net/netfilter/nf_conntrack_core.c h = hlist_entry(init_net.ct.hash[i].first,
h 1120 net/netfilter/nf_conntrack_core.c hlist_del_rcu(&h->hnode);
h 1121 net/netfilter/nf_conntrack_core.c bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
h 1122 net/netfilter/nf_conntrack_core.c hlist_add_head(&h->hnode, &hash[bucket]);
h 90 net/netfilter/nf_conntrack_expect.c unsigned int h;
h 95 net/netfilter/nf_conntrack_expect.c h = nf_ct_expect_dst_hash(tuple);
h 96 net/netfilter/nf_conntrack_expect.c hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
h 127 net/netfilter/nf_conntrack_expect.c unsigned int h;
h 132 net/netfilter/nf_conntrack_expect.c h = nf_ct_expect_dst_hash(tuple);
h 133 net/netfilter/nf_conntrack_expect.c hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
h 312 net/netfilter/nf_conntrack_expect.c unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
h 319 net/netfilter/nf_conntrack_expect.c hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
h 373 net/netfilter/nf_conntrack_expect.c unsigned int h;
h 383 net/netfilter/nf_conntrack_expect.c h = nf_ct_expect_dst_hash(&expect->tuple);
h 384 net/netfilter/nf_conntrack_expect.c hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
h 53 net/netfilter/nf_conntrack_helper.c unsigned int h;
h 58 net/netfilter/nf_conntrack_helper.c h = helper_hash(tuple);
h 59 net/netfilter/nf_conntrack_helper.c hlist_for_each_entry_rcu(helper, n, &nf_ct_helper_hash[h], hnode) {
h 70 net/netfilter/nf_conntrack_helper.c struct nf_conntrack_helper *h;
h 75 net/netfilter/nf_conntrack_helper.c hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
h 76 net/netfilter/nf_conntrack_helper.c if (!strcmp(h->name, name))
h 77 net/netfilter/nf_conntrack_helper.c return h;
h 112 net/netfilter/nf_conntrack_helper.c unsigned int h = helper_hash(&me->tuple);
h 118 net/netfilter/nf_conntrack_helper.c hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
h 129 net/netfilter/nf_conntrack_helper.c struct nf_conntrack_tuple_hash *h;
h 148 net/netfilter/nf_conntrack_helper.c hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode)
h 149 net/netfilter/nf_conntrack_helper.c unhelp(h, me);
h 151 net/netfilter/nf_conntrack_helper.c hlist_for_each_entry(h, n, &net->ct.hash[i], hnode)
h 152 net/netfilter/nf_conntrack_helper.c unhelp(h, me);
h 543 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h;
h 552 net/netfilter/nf_conntrack_netlink.c hlist_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
h 554 net/netfilter/nf_conntrack_netlink.c if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
h 556 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h);
h 784 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h;
h 804 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(&init_net, &tuple);
h 805 net/netfilter/nf_conntrack_netlink.c if (!h)
h 808 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h);
h 828 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h;
h 850 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(&init_net, &tuple);
h 851 net/netfilter/nf_conntrack_netlink.c if (!h)
h 854 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h);
h 1197 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h = NULL;
h 1216 net/netfilter/nf_conntrack_netlink.c h = __nf_conntrack_find(&init_net, &otuple);
h 1218 net/netfilter/nf_conntrack_netlink.c h = __nf_conntrack_find(&init_net, &rtuple);
h 1220 net/netfilter/nf_conntrack_netlink.c if (h == NULL) {
h 1270 net/netfilter/nf_conntrack_netlink.c err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h),
h 1573 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_helper *h;
h 1610 net/netfilter/nf_conntrack_netlink.c h = __nf_conntrack_helper_find_byname(name);
h 1611 net/netfilter/nf_conntrack_netlink.c if (!h) {
h 1620 net/netfilter/nf_conntrack_netlink.c if (m_help->helper == h
h 1656 net/netfilter/nf_conntrack_netlink.c struct nf_conntrack_tuple_hash *h = NULL;
h 1674 net/netfilter/nf_conntrack_netlink.c h = nf_conntrack_find_get(&init_net, &master_tuple);
h 1675 net/netfilter/nf_conntrack_netlink.c if (!h)
h 1677 net/netfilter/nf_conntrack_netlink.c ct = nf_ct_tuplehash_to_ctrack(h);
h 1704 net/netfilter/nf_conntrack_netlink.c nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
h 141 net/netfilter/nf_conntrack_pptp.c const struct nf_conntrack_tuple_hash *h;
h 148 net/netfilter/nf_conntrack_pptp.c h = nf_conntrack_find_get(net, t);
h 149 net/netfilter/nf_conntrack_pptp.c if (h) {
h 150 net/netfilter/nf_conntrack_pptp.c sibling = nf_ct_tuplehash_to_ctrack(h);
h 857 net/netfilter/nfnetlink_log.c h = h->next;
h 858 net/netfilter/nfnetlink_log.c while (!h) {
h 862 net/netfilter/nfnetlink_log.c h = instance_table[st->bucket].first;
h 864 net/netfilter/nfnetlink_log.c return h;
h 93 net/netfilter/nfnetlink_queue.c unsigned int h;
h 122 net/netfilter/nfnetlink_queue.c h = instance_hashfn(queue_num);
h 123 net/netfilter/nfnetlink_queue.c hlist_add_head_rcu(&inst->hlist, &instance_table[h]);
h 819 net/netfilter/nfnetlink_queue.c h = h->next;
h 820 net/netfilter/nfnetlink_queue.c while (!h) {
h 824 net/netfilter/nfnetlink_queue.c h = instance_table[st->bucket].first;
h 826 net/netfilter/nfnetlink_queue.c return h;
h 35 net/netfilter/xt_RATEEST.c unsigned int h;
h 37 net/netfilter/xt_RATEEST.c h = xt_rateest_hash(est->name);
h 38 net/netfilter/xt_RATEEST.c hlist_add_head(&est->list, &rateest_hash[h]);
h 45 net/netfilter/xt_RATEEST.c unsigned int h;
h 47 net/netfilter/xt_RATEEST.c h = xt_rateest_hash(name);
h 49 net/netfilter/xt_RATEEST.c hlist_for_each_entry(est, n, &rateest_hash[h], list) {
h 125 net/netfilter/xt_recent.c unsigned int h;
h 128 net/netfilter/xt_recent.c h = recent_entry_hash4(addrp);
h 130 net/netfilter/xt_recent.c h = recent_entry_hash6(addrp);
h 132 net/netfilter/xt_recent.c list_for_each_entry(e, &table->iphash[h], list)
h 78 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) {
h 90 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) {
h 136 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) {
h 148 net/netlabel/netlabel_addrlist.h while (i != h && !n->valid) {
h 215 net/packet/af_packet.c } h;
h 220 net/packet/af_packet.c h.raw = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
h 223 net/packet/af_packet.c if (status != h.h1->tp_status ? TP_STATUS_USER :
h 228 net/packet/af_packet.c if (status != h.h2->tp_status ? TP_STATUS_USER :
h 233 net/packet/af_packet.c return h.raw;
h 242 net/packet/af_packet.c } h;
h 244 net/packet/af_packet.c h.raw = frame;
h 247 net/packet/af_packet.c h.h1->tp_status = status;
h 250 net/packet/af_packet.c h.h2->tp_status = status;
h 599 net/packet/af_packet.c } h;
h 668 net/packet/af_packet.c h.raw = packet_lookup_frame(po, po->head, TP_STATUS_KERNEL);
h 669 net/packet/af_packet.c if (!h.raw)
h 681 net/packet/af_packet.c skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
h 685 net/packet/af_packet.c h.h1->tp_len = skb->len;
h 686 net/packet/af_packet.c h.h1->tp_snaplen = snaplen;
h 687 net/packet/af_packet.c h.h1->tp_mac = macoff;
h 688 net/packet/af_packet.c h.h1->tp_net = netoff;
h 693 net/packet/af_packet.c h.h1->tp_sec = tv.tv_sec;
h 694 net/packet/af_packet.c h.h1->tp_usec = tv.tv_usec;
h 695 net/packet/af_packet.c hdrlen = sizeof(*h.h1);
h 698 net/packet/af_packet.c h.h2->tp_len = skb->len;
h 699 net/packet/af_packet.c h.h2->tp_snaplen = snaplen;
h 700 net/packet/af_packet.c h.h2->tp_mac = macoff;
h 701 net/packet/af_packet.c h.h2->tp_net = netoff;
h 706 net/packet/af_packet.c h.h2->tp_sec = ts.tv_sec;
h 707 net/packet/af_packet.c h.h2->tp_nsec = ts.tv_nsec;
h 708 net/packet/af_packet.c h.h2->tp_vlan_tci = skb->vlan_tci;
h 709 net/packet/af_packet.c hdrlen = sizeof(*h.h2);
h 715 net/packet/af_packet.c sll = h.raw + TPACKET_ALIGN(hdrlen);
h 726 net/packet/af_packet.c __packet_set_status(po, h.raw, status);
h 731 net/packet/af_packet.c u8 *h_end = h.raw + macoff + snaplen - 1;
h 733 net/packet/af_packet.c p_start = virt_to_page(h.raw);
h 67 net/phonet/pep.c } *ph, h;
h 70 net/phonet/pep.c ph = skb_header_pointer(skb, 0, 2, &h);
h 30 net/sched/act_api.c unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
h 33 net/sched/act_api.c for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
h 241 net/sched/act_api.c unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
h 244 net/sched/act_api.c p->tcfc_next = hinfo->htab[h];
h 245 net/sched/act_api.c hinfo->htab[h] = p;
h 587 net/sched/act_api.c struct tcf_act_hdr *h = a->priv;
h 589 net/sched/act_api.c if (h == NULL)
h 598 net/sched/act_api.c TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d);
h 603 net/sched/act_api.c &h->tcf_lock, &d);
h 612 net/sched/act_api.c if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
h 613 net/sched/act_api.c gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
h 614 net/sched/act_api.c gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
h 101 net/sched/act_police.c unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
h 104 net/sched/act_police.c for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
h 132 net/sched/act_police.c unsigned h;
h 232 net/sched/act_police.c h = tcf_hash(police->tcf_index, POL_TAB_MASK);
h 234 net/sched/act_police.c police->tcf_next = tcf_police_ht[h];
h 235 net/sched/act_police.c tcf_police_ht[h] = &police->common;
h 153 net/sched/cls_fw.c int h;
h 158 net/sched/cls_fw.c for (h=0; h<HTSIZE; h++) {
h 159 net/sched/cls_fw.c while ((f=head->ht[h]) != NULL) {
h 160 net/sched/cls_fw.c head->ht[h] = f->next;
h 305 net/sched/cls_fw.c int h;
h 313 net/sched/cls_fw.c for (h = 0; h < HTSIZE; h++) {
h 316 net/sched/cls_fw.c for (f = head->ht[h]; f; f = f->next) {
h 89 net/sched/cls_route.c int h = route4_fastmap_hash(id, iif);
h 90 net/sched/cls_route.c head->fastmap[h].id = id;
h 91 net/sched/cls_route.c head->fastmap[h].iif = iif;
h 92 net/sched/cls_route.c head->fastmap[h].filter = f;
h 137 net/sched/cls_route.c u32 id, h;
h 149 net/sched/cls_route.c h = route4_fastmap_hash(id, iif);
h 150 net/sched/cls_route.c if (id == head->fastmap[h].id &&
h 151 net/sched/cls_route.c iif == head->fastmap[h].iif &&
h 152 net/sched/cls_route.c (f = head->fastmap[h].filter) != NULL) {
h 160 net/sched/cls_route.c h = route4_hash_to(id);
h 163 net/sched/cls_route.c if ((b = head->table[h]) != NULL) {
h 176 net/sched/cls_route.c if (h < 256) {
h 177 net/sched/cls_route.c h = 256;
h 199 net/sched/cls_route.c u32 h = id&0xFF;
h 201 net/sched/cls_route.c h += 256;
h 202 net/sched/cls_route.c return h;
h 291 net/sched/cls_route.c unsigned h = 0;
h 298 net/sched/cls_route.c h = f->handle;
h 301 net/sched/cls_route.c for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
h 318 net/sched/cls_route.c head->table[to_hash(h)] = NULL;
h 435 net/sched/cls_route.c unsigned int h, th;
h 482 net/sched/cls_route.c h = from_hash(f->handle >> 16);
h 483 net/sched/cls_route.c for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
h 493 net/sched/cls_route.c h = from_hash(old_handle >> 16);
h 495 net/sched/cls_route.c for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
h 517 net/sched/cls_route.c unsigned h, h1;
h 525 net/sched/cls_route.c for (h = 0; h <= 256; h++) {
h 526 net/sched/cls_route.c struct route4_bucket *b = head->table[h];
h 105 net/sched/cls_rsvp.h unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
h 106 net/sched/cls_rsvp.h h ^= h>>16;
h 107 net/sched/cls_rsvp.h h ^= h>>8;
h 108 net/sched/cls_rsvp.h return (h ^ protocol ^ tunnelid) & 0xFF;
h 113 net/sched/cls_rsvp.h unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
h 114 net/sched/cls_rsvp.h h ^= h>>16;
h 115 net/sched/cls_rsvp.h h ^= h>>8;
h 116 net/sched/cls_rsvp.h h ^= h>>4;
h 117 net/sched/cls_rsvp.h return h & 0xF;
h 293 net/sched/cls_rsvp.h unsigned h = f->handle;
h 298 net/sched/cls_rsvp.h for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) {
h 312 net/sched/cls_rsvp.h for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
h 336 net/sched/cls_rsvp.h u32 h;
h 339 net/sched/cls_rsvp.h h = data->hgenerator|salt;
h 340 net/sched/cls_rsvp.h if (rsvp_get(tp, h) == 0)
h 341 net/sched/cls_rsvp.h return h;
h 560 net/sched/cls_rsvp.h unsigned h, h1;
h 565 net/sched/cls_rsvp.h for (h = 0; h < 256; h++) {
h 568 net/sched/cls_rsvp.h for (s = head->ht[h]; s; s = s->next) {
h 49 net/sched/cls_tcindex.c struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
h 77 net/sched/cls_tcindex.c else if (p->h) {
h 78 net/sched/cls_tcindex.c for (f = p->h[key % p->hash]; f; f = f->next)
h 166 net/sched/cls_tcindex.c for (walk = p->h+i; *walk; walk = &(*walk)->next)
h 245 net/sched/cls_tcindex.c } else if (cp.h && cp.hash != cp.alloc_hash)
h 262 net/sched/cls_tcindex.c if (!cp.perfect && !cp.h)
h 276 net/sched/cls_tcindex.c if (!cp.perfect && !cp.h) {
h 283 net/sched/cls_tcindex.c cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
h 284 net/sched/cls_tcindex.c if (!cp.h)
h 321 net/sched/cls_tcindex.c for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
h 333 net/sched/cls_tcindex.c kfree(cp.h);
h 386 net/sched/cls_tcindex.c if (!p->h)
h 389 net/sched/cls_tcindex.c for (f = p->h[i]; f; f = next) {
h 422 net/sched/cls_tcindex.c kfree(p->h);
h 438 net/sched/cls_tcindex.c pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
h 460 net/sched/cls_tcindex.c for (f = p->h[i]; !t->tcm_handle && f;
h 91 net/sched/cls_u32.c unsigned h = ntohl(key & sel->hmask)>>fshift;
h 93 net/sched/cls_u32.c return h;
h 355 net/sched/cls_u32.c unsigned h;
h 357 net/sched/cls_u32.c for (h=0; h<=ht->divisor; h++) {
h 358 net/sched/cls_u32.c while ((n = ht->ht[h]) != NULL) {
h 359 net/sched/cls_u32.c ht->ht[h] = n->next;
h 658 net/sched/cls_u32.c unsigned h;
h 673 net/sched/cls_u32.c for (h = 0; h <= ht->divisor; h++) {
h 674 net/sched/cls_u32.c for (n = ht->ht[h]; n; n = n->next) {
h 502 net/sched/sch_api.c struct hlist_head *h;
h 505 net/sched/sch_api.c h = kmalloc(size, GFP_KERNEL);
h 507 net/sched/sch_api.c h = (struct hlist_head *)
h 510 net/sched/sch_api.c if (h != NULL) {
h 512 net/sched/sch_api.c INIT_HLIST_HEAD(&h[i]);
h 514 net/sched/sch_api.c return h;
h 522 net/sched/sch_api.c kfree(h);
h 524 net/sched/sch_api.c free_pages((unsigned long)h, get_order(size));
h 533 net/sched/sch_api.c unsigned int i, h;
h 550 net/sched/sch_api.c h = qdisc_class_hash(cl->classid, nmask);
h 551 net/sched/sch_api.c hlist_add_head(&cl->hnode, &nhash[h]);
h 586 net/sched/sch_api.c unsigned int h;
h 589 net/sched/sch_api.c h = qdisc_class_hash(cl->classid, clhash->hashmask);
h 590 net/sched/sch_api.c hlist_add_head(&cl->hnode, &clhash->hash[h]);
h 1076 net/sched/sch_cbq.c unsigned int h;
h 1081 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) {
h 1082 net/sched/sch_cbq.c hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
h 1102 net/sched/sch_cbq.c unsigned h;
h 1119 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) {
h 1123 net/sched/sch_cbq.c hlist_for_each_entry(c, n, &q->clhash.hash[h],
h 1243 net/sched/sch_cbq.c unsigned h;
h 1258 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) {
h 1259 net/sched/sch_cbq.c hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
h 1729 net/sched/sch_cbq.c unsigned h;
h 1739 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) {
h 1740 net/sched/sch_cbq.c hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode)
h 1743 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) {
h 1744 net/sched/sch_cbq.c hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h],
h 2025 net/sched/sch_cbq.c unsigned h;
h 2030 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) {
h 2031 net/sched/sch_cbq.c hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
h 114 net/sched/sch_sfq.c return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
h 119 net/sched/sch_sfq.c u32 h, h2;
h 125 net/sched/sch_sfq.c h = iph->daddr;
h 140 net/sched/sch_sfq.c h = iph->daddr.s6_addr32[3];
h 152 net/sched/sch_sfq.c h = (unsigned long)skb->dst ^ skb->protocol;
h 156 net/sched/sch_sfq.c return sfq_fold_hash(q, h, h2);
h 72 net/sunrpc/auth_gss/svcauth_gss.c struct cache_head h;
h 93 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *rsii = container_of(ref, struct rsi, h.ref);
h 106 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *item = container_of(a, struct rsi, h);
h 107 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *tmp = container_of(b, struct rsi, h);
h 128 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *new = container_of(cnew, struct rsi, h);
h 129 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *item = container_of(citem, struct rsi, h);
h 147 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *new = container_of(cnew, struct rsi, h);
h 148 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *item = container_of(citem, struct rsi, h);
h 168 net/sunrpc/auth_gss/svcauth_gss.c return &rsii->h;
h 177 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *rsii = container_of(h, struct rsi, h);
h 218 net/sunrpc/auth_gss/svcauth_gss.c rsii.h.flags = 0;
h 255 net/sunrpc/auth_gss/svcauth_gss.c rsii.h.expiry_time = expiry;
h 261 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rsip->h, &rsi_cache);
h 286 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
h 288 net/sunrpc/auth_gss/svcauth_gss.c return container_of(ch, struct rsi, h);
h 298 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_update(&rsi_cache, &new->h,
h 299 net/sunrpc/auth_gss/svcauth_gss.c &old->h, hash);
h 301 net/sunrpc/auth_gss/svcauth_gss.c return container_of(ch, struct rsi, h);
h 330 net/sunrpc/auth_gss/svcauth_gss.c struct cache_head h;
h 353 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *rsci = container_of(ref, struct rsc, h.ref);
h 368 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(a, struct rsc, h);
h 369 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *tmp = container_of(b, struct rsc, h);
h 377 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(cnew, struct rsc, h);
h 378 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *tmp = container_of(ctmp, struct rsc, h);
h 391 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *new = container_of(cnew, struct rsc, h);
h 392 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *tmp = container_of(ctmp, struct rsc, h);
h 407 net/sunrpc/auth_gss/svcauth_gss.c return &rsci->h;
h 431 net/sunrpc/auth_gss/svcauth_gss.c rsci.h.flags = 0;
h 447 net/sunrpc/auth_gss/svcauth_gss.c set_bit(CACHE_NEGATIVE, &rsci.h.flags);
h 490 net/sunrpc/auth_gss/svcauth_gss.c rsci.h.expiry_time = expiry;
h 497 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rscp->h, &rsc_cache);
h 521 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
h 523 net/sunrpc/auth_gss/svcauth_gss.c return container_of(ch, struct rsc, h);
h 533 net/sunrpc/auth_gss/svcauth_gss.c ch = sunrpc_cache_update(&rsc_cache, &new->h,
h 534 net/sunrpc/auth_gss/svcauth_gss.c &old->h, hash);
h 536 net/sunrpc/auth_gss/svcauth_gss.c return container_of(ch, struct rsc, h);
h 555 net/sunrpc/auth_gss/svcauth_gss.c if (cache_check(&rsc_cache, &found->h, NULL))
h 725 net/sunrpc/auth_gss/svcauth_gss.c struct auth_domain h;
h 744 net/sunrpc/auth_gss/svcauth_gss.c struct gss_domain *gd = container_of(dom, struct gss_domain, h);
h 761 net/sunrpc/auth_gss/svcauth_gss.c kref_init(&new->h.ref);
h 762 net/sunrpc/auth_gss/svcauth_gss.c new->h.name = kstrdup(name, GFP_KERNEL);
h 763 net/sunrpc/auth_gss/svcauth_gss.c if (!new->h.name)
h 765 net/sunrpc/auth_gss/svcauth_gss.c new->h.flavour = &svcauthops_gss;
h 769 net/sunrpc/auth_gss/svcauth_gss.c test = auth_domain_lookup(name, &new->h);
h 770 net/sunrpc/auth_gss/svcauth_gss.c if (test != &new->h) { /* Duplicate registration */
h 772 net/sunrpc/auth_gss/svcauth_gss.c kfree(new->h.name);
h 956 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rsci->h, &rsc_cache);
h 1007 net/sunrpc/auth_gss/svcauth_gss.c switch (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
h 1033 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rsip->h, &rsi_cache);
h 1129 net/sunrpc/auth_gss/svcauth_gss.c rsci->h.expiry_time = get_seconds();
h 1130 net/sunrpc/auth_gss/svcauth_gss.c set_bit(CACHE_NEGATIVE, &rsci->h.flags);
h 1166 net/sunrpc/auth_gss/svcauth_gss.c cache_get(&rsci->h);
h 1187 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&rsci->h, &rsc_cache);
h 1365 net/sunrpc/auth_gss/svcauth_gss.c cache_put(&gsd->rsci->h, &rsc_cache);
h 1374 net/sunrpc/auth_gss/svcauth_gss.c struct gss_domain *gd = container_of(dom, struct gss_domain, h);
h 43 net/sunrpc/cache.c h->next = NULL;
h 44 net/sunrpc/cache.c h->flags = 0;
h 45 net/sunrpc/cache.c kref_init(&h->ref);
h 46 net/sunrpc/cache.c h->expiry_time = now + CACHE_NEW_EXPIRY;
h 47 net/sunrpc/cache.c h->last_refresh = now;
h 198 net/sunrpc/cache.c if (!test_bit(CACHE_VALID, &h->flags) ||
h 199 net/sunrpc/cache.c h->expiry_time < get_seconds())
h 201 net/sunrpc/cache.c else if (detail->flush_time > h->last_refresh)
h 205 net/sunrpc/cache.c if (test_bit(CACHE_NEGATIVE, &h->flags))
h 211 net/sunrpc/cache.c refresh_age = (h->expiry_time - h->last_refresh);
h 212 net/sunrpc/cache.c age = get_seconds() - h->last_refresh;
h 220 net/sunrpc/cache.c if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
h 221 net/sunrpc/cache.c switch (cache_make_upcall(detail, h)) {
h 223 net/sunrpc/cache.c clear_bit(CACHE_PENDING, &h->flags);
h 225 net/sunrpc/cache.c set_bit(CACHE_NEGATIVE, &h->flags);
h 226 net/sunrpc/cache.c cache_fresh_unlocked(h, detail,
h 227 net/sunrpc/cache.c cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY));
h 233 net/sunrpc/cache.c clear_bit(CACHE_PENDING, &h->flags);
h 234 net/sunrpc/cache.c cache_revisit_request(h);
h 241 net/sunrpc/cache.c if (cache_defer_req(rqstp, h) != 0)
h 245 net/sunrpc/cache.c cache_put(h, detail);
h 1064 net/sunrpc/cache.c detail->cache_request(detail, h, &bp, &len);
h 1072 net/sunrpc/cache.c crq->item = cache_get(h);
h 28 net/sunrpc/svcauth_unix.c struct auth_domain h;
h 43 net/sunrpc/svcauth_unix.c if (new && rv != &new->h)
h 44 net/sunrpc/svcauth_unix.c auth_domain_put(&new->h);
h 56 net/sunrpc/svcauth_unix.c kref_init(&new->h.ref);
h 57 net/sunrpc/svcauth_unix.c new->h.name = kstrdup(name, GFP_KERNEL);
h 58 net/sunrpc/svcauth_unix.c if (new->h.name == NULL) {
h 62 net/sunrpc/svcauth_unix.c new->h.flavour = &svcauth_unix;
h 64 net/sunrpc/svcauth_unix.c rv = auth_domain_lookup(name, &new->h);
h 71 net/sunrpc/svcauth_unix.c struct unix_domain *ud = container_of(dom, struct unix_domain, h);
h 87 net/sunrpc/svcauth_unix.c struct cache_head h;
h 98 net/sunrpc/svcauth_unix.c struct ip_map *im = container_of(item, struct ip_map,h);
h 102 net/sunrpc/svcauth_unix.c auth_domain_put(&im->m_client->h);
h 126 net/sunrpc/svcauth_unix.c struct ip_map *orig = container_of(corig, struct ip_map, h);
h 127 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h);
h 133 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h);
h 134 net/sunrpc/svcauth_unix.c struct ip_map *item = container_of(citem, struct ip_map, h);
h 141 net/sunrpc/svcauth_unix.c struct ip_map *new = container_of(cnew, struct ip_map, h);
h 142 net/sunrpc/svcauth_unix.c struct ip_map *item = container_of(citem, struct ip_map, h);
h 144 net/sunrpc/svcauth_unix.c kref_get(&item->m_client->h.ref);
h 152 net/sunrpc/svcauth_unix.c return &i->h;
h 162 net/sunrpc/svcauth_unix.c struct ip_map *im = container_of(h, struct ip_map, h);
h 248 net/sunrpc/svcauth_unix.c container_of(dom, struct unix_domain, h),
h 268 net/sunrpc/svcauth_unix.c if (h == NULL) {
h 272 net/sunrpc/svcauth_unix.c im = container_of(h, struct ip_map, h);
h 276 net/sunrpc/svcauth_unix.c if (test_bit(CACHE_VALID, &h->flags) &&
h 277 net/sunrpc/svcauth_unix.c !test_bit(CACHE_NEGATIVE, &h->flags))
h 278 net/sunrpc/svcauth_unix.c dom = im->m_client->h.name;
h 318 net/sunrpc/svcauth_unix.c ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
h 323 net/sunrpc/svcauth_unix.c return container_of(ch, struct ip_map, h);
h 334 net/sunrpc/svcauth_unix.c ip.h.flags = 0;
h 336 net/sunrpc/svcauth_unix.c set_bit(CACHE_NEGATIVE, &ip.h.flags);
h 345 net/sunrpc/svcauth_unix.c ip.h.expiry_time = expiry;
h 347 net/sunrpc/svcauth_unix.c &ip.h, &ipm->h,
h 363 net/sunrpc/svcauth_unix.c udom = container_of(dom, struct unix_domain, h);
h 379 net/sunrpc/svcauth_unix.c udom = container_of(dom, struct unix_domain, h);
h 394 net/sunrpc/svcauth_unix.c if (cache_check(&ip_map_cache, &ipm->h, NULL))
h 398 net/sunrpc/svcauth_unix.c if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
h 399 net/sunrpc/svcauth_unix.c auth_domain_put(&ipm->m_client->h);
h 402 net/sunrpc/svcauth_unix.c rv = &ipm->m_client->h;
h 405 net/sunrpc/svcauth_unix.c cache_put(&ipm->h, &ip_map_cache);
h 426 net/sunrpc/svcauth_unix.c if (!cache_valid(&ipm->h)) {
h 434 net/sunrpc/svcauth_unix.c cache_put(&ipm->h, &ip_map_cache);
h 437 net/sunrpc/svcauth_unix.c cache_get(&ipm->h);
h 459 net/sunrpc/svcauth_unix.c cache_put(&ipm->h, &ip_map_cache);
h 466 net/sunrpc/svcauth_unix.c cache_put(&ipm->h, &ip_map_cache);
h 479 net/sunrpc/svcauth_unix.c struct cache_head h;
h 488 net/sunrpc/svcauth_unix.c struct unix_gid *ug = container_of(item, struct unix_gid, h);
h 497 net/sunrpc/svcauth_unix.c struct unix_gid *orig = container_of(corig, struct unix_gid, h);
h 498 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h);
h 503 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h);
h 504 net/sunrpc/svcauth_unix.c struct unix_gid *item = container_of(citem, struct unix_gid, h);
h 509 net/sunrpc/svcauth_unix.c struct unix_gid *new = container_of(cnew, struct unix_gid, h);
h 510 net/sunrpc/svcauth_unix.c struct unix_gid *item = container_of(citem, struct unix_gid, h);
h 519 net/sunrpc/svcauth_unix.c return &g->h;
h 529 net/sunrpc/svcauth_unix.c struct unix_gid *ug = container_of(h, struct unix_gid, h);
h 584 net/sunrpc/svcauth_unix.c ug.h.flags = 0;
h 585 net/sunrpc/svcauth_unix.c ug.h.expiry_time = expiry;
h 587 net/sunrpc/svcauth_unix.c &ug.h, &ugp->h,
h 611 net/sunrpc/svcauth_unix.c if (h == NULL) {
h 615 net/sunrpc/svcauth_unix.c ug = container_of(h, struct unix_gid, h);
h 616 net/sunrpc/svcauth_unix.c if (test_bit(CACHE_VALID, &h->flags) &&
h 617 net/sunrpc/svcauth_unix.c !test_bit(CACHE_NEGATIVE, &h->flags))
h 650 net/sunrpc/svcauth_unix.c ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h,
h 653 net/sunrpc/svcauth_unix.c return container_of(ch, struct unix_gid, h);
h 664 net/sunrpc/svcauth_unix.c switch (cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle)) {
h 710 net/sunrpc/svcauth_unix.c switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
h 719 net/sunrpc/svcauth_unix.c rqstp->rq_client = &ipm->m_client->h;
h 32 net/xfrm/xfrm_hash.h unsigned int h = family ^ reqid;
h 35 net/xfrm/xfrm_hash.h h ^= __xfrm4_daddr_saddr_hash(daddr, saddr);
h 38 net/xfrm/xfrm_hash.h h ^= __xfrm6_daddr_saddr_hash(daddr, saddr);
h 41 net/xfrm/xfrm_hash.h return (h ^ (h >> 16)) & hmask;
h 49 net/xfrm/xfrm_hash.h unsigned int h = family;
h 52 net/xfrm/xfrm_hash.h h ^= __xfrm4_daddr_saddr_hash(daddr, saddr);
h 55 net/xfrm/xfrm_hash.h h ^= __xfrm6_daddr_saddr_hash(daddr, saddr);
h 58 net/xfrm/xfrm_hash.h return (h ^ (h >> 16)) & hmask;
h 65 net/xfrm/xfrm_hash.h unsigned int h = (__force u32)spi ^ proto;
h 68 net/xfrm/xfrm_hash.h h ^= __xfrm4_addr_hash(daddr);
h 71 net/xfrm/xfrm_hash.h h ^= __xfrm6_addr_hash(daddr);
h 74 net/xfrm/xfrm_hash.h return (h ^ (h >> 10) ^ (h >> 20)) & hmask;
h 86 net/xfrm/xfrm_hash.h unsigned int h = 0;
h 94 net/xfrm/xfrm_hash.h h = __xfrm4_daddr_saddr_hash(daddr, saddr);
h 102 net/xfrm/xfrm_hash.h h = __xfrm6_daddr_saddr_hash(daddr, saddr);
h 105 net/xfrm/xfrm_hash.h h ^= (h >> 16);
h 106 net/xfrm/xfrm_hash.h return h & hmask;
h 111 net/xfrm/xfrm_hash.h unsigned int h = 0;
h 115 net/xfrm/xfrm_hash.h h = __xfrm4_daddr_saddr_hash(daddr, saddr);
h 119 net/xfrm/xfrm_hash.h h = __xfrm6_daddr_saddr_hash(daddr, saddr);
h 122 net/xfrm/xfrm_hash.h h ^= (h >> 16);
h 123 net/xfrm/xfrm_hash.h return h & hmask;
h 369 net/xfrm/xfrm_policy.c unsigned int h;
h 371 net/xfrm/xfrm_policy.c h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
h 375 net/xfrm/xfrm_policy.c hlist_add_head(&pol->bydst, ndsttable+h);
h 376 net/xfrm/xfrm_policy.c h0 = h;
h 378 net/xfrm/xfrm_policy.c if (h != h0)
h 399 net/xfrm/xfrm_policy.c unsigned int h;
h 401 net/xfrm/xfrm_policy.c h = __idx_hash(pol->index, nhashmask);
h 402 net/xfrm/xfrm_policy.c hlist_add_head(&pol->byidx, nidxtable+h);
h 103 net/xfrm/xfrm_state.c unsigned int h;
h 105 net/xfrm/xfrm_state.c h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
h 108 net/xfrm/xfrm_state.c hlist_add_head(&x->bydst, ndsttable+h);
h 110 net/xfrm/xfrm_state.c h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
h 113 net/xfrm/xfrm_state.c hlist_add_head(&x->bysrc, nsrctable+h);
h 116 net/xfrm/xfrm_state.c h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
h 119 net/xfrm/xfrm_state.c hlist_add_head(&x->byspi, nspitable+h);
h 686 net/xfrm/xfrm_state.c unsigned int h = xfrm_spi_hash(daddr, spi, proto, family);
h 690 net/xfrm/xfrm_state.c hlist_for_each_entry(x, entry, xfrm_state_byspi+h, byspi) {
h 718 net/xfrm/xfrm_state.c unsigned int h = xfrm_src_hash(daddr, saddr, family);
h 722 net/xfrm/xfrm_state.c hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
h 777 net/xfrm/xfrm_state.c unsigned int h;
h 787 net/xfrm/xfrm_state.c h = xfrm_dst_hash(daddr, saddr, tmpl->reqid, family);
h 788 net/xfrm/xfrm_state.c hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
h 858 net/xfrm/xfrm_state.c hlist_add_head(&x->bydst, xfrm_state_bydst+h);
h 859 net/xfrm/xfrm_state.c h = xfrm_src_hash(daddr, saddr, family);
h 860 net/xfrm/xfrm_state.c hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
h 862 net/xfrm/xfrm_state.c h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
h 863 net/xfrm/xfrm_state.c hlist_add_head(&x->byspi, xfrm_state_byspi+h);
h 892 net/xfrm/xfrm_state.c unsigned int h;
h 897 net/xfrm/xfrm_state.c h = xfrm_dst_hash(daddr, saddr, reqid, family);
h 898 net/xfrm/xfrm_state.c hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
h 922 net/xfrm/xfrm_state.c unsigned int h;
h 928 net/xfrm/xfrm_state.c h = xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
h 930 net/xfrm/xfrm_state.c hlist_add_head(&x->bydst, xfrm_state_bydst+h);
h 932 net/xfrm/xfrm_state.c h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
h 933 net/xfrm/xfrm_state.c hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
h 936 net/xfrm/xfrm_state.c h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto,
h 939 net/xfrm/xfrm_state.c hlist_add_head(&x->byspi, xfrm_state_byspi+h);
h 960 net/xfrm/xfrm_state.c unsigned int h;
h 962 net/xfrm/xfrm_state.c h = xfrm_dst_hash(&xnew->id.daddr, &xnew->props.saddr, reqid, family);
h 963 net/xfrm/xfrm_state.c hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
h 984 net/xfrm/xfrm_state.c unsigned int h = xfrm_dst_hash(daddr, saddr, reqid, family);
h 988 net/xfrm/xfrm_state.c hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
h 1056 net/xfrm/xfrm_state.c hlist_add_head(&x->bydst, xfrm_state_bydst+h);
h 1057 net/xfrm/xfrm_state.c h = xfrm_src_hash(daddr, saddr, family);
h 1058 net/xfrm/xfrm_state.c hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
h 1204 net/xfrm/xfrm_state.c unsigned int h;
h 1209 net/xfrm/xfrm_state.c h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
h 1211 net/xfrm/xfrm_state.c hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
h 1226 net/xfrm/xfrm_state.c h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
h 1228 net/xfrm/xfrm_state.c hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
h 1497 net/xfrm/xfrm_state.c unsigned int h;
h 1522 net/xfrm/xfrm_state.c for (h=0; h<high-low+1; h++) {
h 1534 net/xfrm/xfrm_state.c h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
h 1535 net/xfrm/xfrm_state.c hlist_add_head(&x->byspi, xfrm_state_byspi+h);
h 149 scripts/genksyms/genksyms.c unsigned long h = crc32(name) % HASH_BUCKETS;
h 152 scripts/genksyms/genksyms.c for (sym = symtab[h]; sym; sym = sym->hash_next)
h 163 scripts/genksyms/genksyms.c unsigned long h = crc32(name) % HASH_BUCKETS;
h 166 scripts/genksyms/genksyms.c for (sym = symtab[h]; sym; sym = sym->hash_next) {
h 183 scripts/genksyms/genksyms.c sym->hash_next = symtab[h];
h 184 scripts/genksyms/genksyms.c symtab[h] = sym;
h 572 scripts/kconfig/gconf.c gint w, h;
h 575 scripts/kconfig/gconf.c gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h);
h 577 scripts/kconfig/gconf.c gdk_window_get_size(widget->window, &w, &h);
h 579 scripts/kconfig/gconf.c if (h == old_h)
h 581 scripts/kconfig/gconf.c old_h = h;
h 583 scripts/kconfig/gconf.c gtk_paned_set_position(GTK_PANED(vpaned), 2 * h / 3);
h 832 scripts/kconfig/gconf.c gint w, h;
h 835 scripts/kconfig/gconf.c gtk_window_get_default_size(GTK_WINDOW(main_wnd), &w, &h);
h 63 scripts/kconfig/lxdialog/util.c dlg.dialog.hl = (h); \
h 49 security/selinux/ss/avtab.c newnode->next = h->htable[hvalue];
h 50 security/selinux/ss/avtab.c h->htable[hvalue] = newnode;
h 53 security/selinux/ss/avtab.c h->nel++;
h 63 security/selinux/ss/avtab.c if (!h || !h->htable)
h 66 security/selinux/ss/avtab.c hvalue = avtab_hash(key, h->mask);
h 67 security/selinux/ss/avtab.c for (prev = NULL, cur = h->htable[hvalue];
h 86 security/selinux/ss/avtab.c newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum);
h 104 security/selinux/ss/avtab.c if (!h || !h->htable)
h 106 security/selinux/ss/avtab.c hvalue = avtab_hash(key, h->mask);
h 107 security/selinux/ss/avtab.c for (prev = NULL, cur = h->htable[hvalue];
h 125 security/selinux/ss/avtab.c return avtab_insert_node(h, hvalue, prev, cur, key, datum);
h 134 security/selinux/ss/avtab.c if (!h || !h->htable)
h 137 security/selinux/ss/avtab.c hvalue = avtab_hash(key, h->mask);
h 138 security/selinux/ss/avtab.c for (cur = h->htable[hvalue]; cur; cur = cur->next) {
h 169 security/selinux/ss/avtab.c if (!h || !h->htable)
h 172 security/selinux/ss/avtab.c hvalue = avtab_hash(key, h->mask);
h 173 security/selinux/ss/avtab.c for (cur = h->htable[hvalue]; cur; cur = cur->next) {
h 227 security/selinux/ss/avtab.c if (!h || !h->htable)
h 230 security/selinux/ss/avtab.c for (i = 0; i < h->nslot; i++) {
h 231 security/selinux/ss/avtab.c cur = h->htable[i];
h 237 security/selinux/ss/avtab.c h->htable[i] = NULL;
h 239 security/selinux/ss/avtab.c kfree(h->htable);
h 240 security/selinux/ss/avtab.c h->htable = NULL;
h 241 security/selinux/ss/avtab.c h->nslot = 0;
h 242 security/selinux/ss/avtab.c h->mask = 0;
h 247 security/selinux/ss/avtab.c h->htable = NULL;
h 248 security/selinux/ss/avtab.c h->nel = 0;
h 273 security/selinux/ss/avtab.c h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
h 274 security/selinux/ss/avtab.c if (!h->htable)
h 278 security/selinux/ss/avtab.c h->nel = 0;
h 279 security/selinux/ss/avtab.c h->nslot = nslot;
h 280 security/selinux/ss/avtab.c h->mask = mask;
h 282 security/selinux/ss/avtab.c h->nslot, nrules);
h 295 security/selinux/ss/avtab.c for (i = 0; i < h->nslot; i++) {
h 296 security/selinux/ss/avtab.c cur = h->htable[i];
h 313 security/selinux/ss/avtab.c tag, h->nel, slots_used, h->nslot, max_chain_len,
h 243 security/selinux/ss/conditional.c if (hashtab_insert(h, key, booldatum))
h 43 security/selinux/ss/hashtab.c if (!h || h->nel == HASHTAB_MAX_NODES)
h 46 security/selinux/ss/hashtab.c hvalue = h->hash_value(h, key);
h 48 security/selinux/ss/hashtab.c cur = h->htable[hvalue];
h 49 security/selinux/ss/hashtab.c while (cur && h->keycmp(h, key, cur->key) > 0) {
h 54 security/selinux/ss/hashtab.c if (cur && (h->keycmp(h, key, cur->key) == 0))
h 66 security/selinux/ss/hashtab.c newnode->next = h->htable[hvalue];
h 67 security/selinux/ss/hashtab.c h->htable[hvalue] = newnode;
h 70 security/selinux/ss/hashtab.c h->nel++;
h 79 security/selinux/ss/hashtab.c if (!h)
h 82 security/selinux/ss/hashtab.c hvalue = h->hash_value(h, key);
h 83 security/selinux/ss/hashtab.c cur = h->htable[hvalue];
h 84 security/selinux/ss/hashtab.c while (cur && h->keycmp(h, key, cur->key) > 0)
h 87 security/selinux/ss/hashtab.c if (cur == NULL || (h->keycmp(h, key, cur->key) != 0))
h 98 security/selinux/ss/hashtab.c if (!h)
h 101 security/selinux/ss/hashtab.c for (i = 0; i < h->size; i++) {
h 102 security/selinux/ss/hashtab.c cur = h->htable[i];
h 108 security/selinux/ss/hashtab.c h->htable[i] = NULL;
h 111 security/selinux/ss/hashtab.c kfree(h->htable);
h 112 security/selinux/ss/hashtab.c h->htable = NULL;
h 114 security/selinux/ss/hashtab.c kfree(h);
h 125 security/selinux/ss/hashtab.c if (!h)
h 128 security/selinux/ss/hashtab.c for (i = 0; i < h->size; i++) {
h 129 security/selinux/ss/hashtab.c cur = h->htable[i];
h 148 security/selinux/ss/hashtab.c for (slots_used = max_chain_len = i = 0; i < h->size; i++) {
h 149 security/selinux/ss/hashtab.c cur = h->htable[i];
h 25 security/selinux/ss/hashtab.h u32 (*hash_value)(struct hashtab *h, const void *key);
h 27 security/selinux/ss/hashtab.h int (*keycmp)(struct hashtab *h, const void *key1, const void *key2);
h 402 security/selinux/ss/policydb.c struct hashtab *h = s[i].table;
h 405 security/selinux/ss/policydb.c hashtab_stat(h, &info);
h 407 security/selinux/ss/policydb.c "longest chain length %d\n", symtab_name[i], h->nel,
h 408 security/selinux/ss/policydb.c info.slots_used, h->size, info.max_chain_len);
h 959 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, perdatum);
h 1012 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, comdatum);
h 1185 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, cladatum);
h 1252 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, role);
h 1308 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, typdatum);
h 1398 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, usrdatum);
h 1449 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, levdatum);
h 1491 security/selinux/ss/policydb.c rc = hashtab_insert(h, key, catdatum);
h 1502 security/selinux/ss/policydb.c static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) =
h 216 security/selinux/ss/sidtab.c cur = h->htable[i];
h 231 security/selinux/ss/sidtab.c "chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
h 23 security/selinux/ss/symtab.c return val & (h->size - 1);
h 55 sound/drivers/vx/vx_mixer.c u16 h;
h 67 sound/drivers/vx/vx_mixer.c u16 h;
h 110 sound/parisc/harmony.c return __raw_readl(h->iobase + r);
h 116 sound/parisc/harmony.c __raw_writel(v, h->iobase + r);
h 122 sound/parisc/harmony.c while (harmony_read(h, HARMONY_CNTL) & HARMONY_CNTL_C) ;
h 128 sound/parisc/harmony.c harmony_write(h, HARMONY_RESET, 1);
h 130 sound/parisc/harmony.c harmony_write(h, HARMONY_RESET, 0);
h 137 sound/parisc/harmony.c harmony_wait_for_control(h);
h 138 sound/parisc/harmony.c dstatus = harmony_read(h, HARMONY_DSTATUS);
h 140 sound/parisc/harmony.c harmony_write(h, HARMONY_DSTATUS, dstatus);
h 147 sound/parisc/harmony.c harmony_wait_for_control(h);
h 148 sound/parisc/harmony.c dstatus = harmony_read(h, HARMONY_DSTATUS);
h 150 sound/parisc/harmony.c harmony_write(h, HARMONY_DSTATUS, dstatus);
h 158 sound/parisc/harmony.c spin_lock_irqsave(&h->mixer_lock, flags);
h 159 sound/parisc/harmony.c harmony_wait_for_control(h);
h 160 sound/parisc/harmony.c harmony_write(h, HARMONY_GAINCTL, HARMONY_GAIN_SILENCE);
h 161 sound/parisc/harmony.c spin_unlock_irqrestore(&h->mixer_lock, flags);
h 169 sound/parisc/harmony.c spin_lock_irqsave(&h->mixer_lock, flags);
h 170 sound/parisc/harmony.c harmony_wait_for_control(h);
h 171 sound/parisc/harmony.c harmony_write(h, HARMONY_GAINCTL, h->st.gain);
h 172 sound/parisc/harmony.c spin_unlock_irqrestore(&h->mixer_lock, flags);
h 181 sound/parisc/harmony.c spin_lock_irqsave(&h->lock, flags);
h 184 sound/parisc/harmony.c (h->st.format << 6) |
h 185 sound/parisc/harmony.c (h->st.stereo << 5) |
h 186 sound/parisc/harmony.c (h->st.rate));
h 188 sound/parisc/harmony.c harmony_wait_for_control(h);
h 189 sound/parisc/harmony.c harmony_write(h, HARMONY_CNTL, ctrl);
h 191 sound/parisc/harmony.c spin_unlock_irqrestore(&h->lock, flags);
h 198 sound/parisc/harmony.c struct snd_harmony *h = dev;
h 200 sound/parisc/harmony.c spin_lock(&h->lock);
h 201 sound/parisc/harmony.c harmony_disable_interrupts(h);
h 202 sound/parisc/harmony.c harmony_wait_for_control(h);
h 203 sound/parisc/harmony.c dstatus = harmony_read(h, HARMONY_DSTATUS);
h 204 sound/parisc/harmony.c spin_unlock(&h->lock);
h 207 sound/parisc/harmony.c if (h->psubs && h->st.playing) {
h 208 sound/parisc/harmony.c spin_lock(&h->lock);
h 209 sound/parisc/harmony.c h->pbuf.buf += h->pbuf.count; /* PAGE_SIZE */
h 210 sound/parisc/harmony.c h->pbuf.buf %= h->pbuf.size; /* MAX_BUFS*PAGE_SIZE */
h 212 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD,
h 213 sound/parisc/harmony.c h->pbuf.addr + h->pbuf.buf);
h 214 sound/parisc/harmony.c h->stats.play_intr++;
h 215 sound/parisc/harmony.c spin_unlock(&h->lock);
h 216 sound/parisc/harmony.c snd_pcm_period_elapsed(h->psubs);
h 218 sound/parisc/harmony.c spin_lock(&h->lock);
h 219 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h->sdma.addr);
h 220 sound/parisc/harmony.c h->stats.silence_intr++;
h 221 sound/parisc/harmony.c spin_unlock(&h->lock);
h 226 sound/parisc/harmony.c if (h->csubs && h->st.capturing) {
h 227 sound/parisc/harmony.c spin_lock(&h->lock);
h 228 sound/parisc/harmony.c h->cbuf.buf += h->cbuf.count;
h 229 sound/parisc/harmony.c h->cbuf.buf %= h->cbuf.size;
h 231 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD,
h 232 sound/parisc/harmony.c h->cbuf.addr + h->cbuf.buf);
h 233 sound/parisc/harmony.c h->stats.rec_intr++;
h 234 sound/parisc/harmony.c spin_unlock(&h->lock);
h 235 sound/parisc/harmony.c snd_pcm_period_elapsed(h->csubs);
h 237 sound/parisc/harmony.c spin_lock(&h->lock);
h 238 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h->gdma.addr);
h 239 sound/parisc/harmony.c h->stats.graveyard_intr++;
h 240 sound/parisc/harmony.c spin_unlock(&h->lock);
h 244 sound/parisc/harmony.c spin_lock(&h->lock);
h 245 sound/parisc/harmony.c harmony_enable_interrupts(h);
h 246 sound/parisc/harmony.c spin_unlock(&h->lock);
h 308 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 310 sound/parisc/harmony.c if (h->st.capturing)
h 313 sound/parisc/harmony.c spin_lock(&h->lock);
h 316 sound/parisc/harmony.c h->st.playing = 1;
h 317 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h->pbuf.addr);
h 318 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h->gdma.addr);
h 319 sound/parisc/harmony.c harmony_unmute(h);
h 320 sound/parisc/harmony.c harmony_enable_interrupts(h);
h 323 sound/parisc/harmony.c h->st.playing = 0;
h 324 sound/parisc/harmony.c harmony_mute(h);
h 325 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h->sdma.addr);
h 326 sound/parisc/harmony.c harmony_disable_interrupts(h);
h 332 sound/parisc/harmony.c spin_unlock(&h->lock);
h 336 sound/parisc/harmony.c spin_unlock(&h->lock);
h 344 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 346 sound/parisc/harmony.c if (h->st.playing)
h 349 sound/parisc/harmony.c spin_lock(&h->lock);
h 352 sound/parisc/harmony.c h->st.capturing = 1;
h 353 sound/parisc/harmony.c harmony_write(h, HARMONY_PNXTADD, h->sdma.addr);
h 354 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h->cbuf.addr);
h 355 sound/parisc/harmony.c harmony_unmute(h);
h 356 sound/parisc/harmony.c harmony_enable_interrupts(h);
h 359 sound/parisc/harmony.c h->st.capturing = 0;
h 360 sound/parisc/harmony.c harmony_mute(h);
h 361 sound/parisc/harmony.c harmony_write(h, HARMONY_RNXTADD, h->gdma.addr);
h 362 sound/parisc/harmony.c harmony_disable_interrupts(h);
h 368 sound/parisc/harmony.c spin_unlock(&h->lock);
h 372 sound/parisc/harmony.c spin_unlock(&h->lock);
h 380 sound/parisc/harmony.c int o = h->st.format;
h 399 sound/parisc/harmony.c snd_pcm_format_set_silence(fmt, h->sdma.area, SILENCE_BUFSZ /
h 410 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 413 sound/parisc/harmony.c if (h->st.capturing)
h 416 sound/parisc/harmony.c h->pbuf.size = snd_pcm_lib_buffer_bytes(ss);
h 417 sound/parisc/harmony.c h->pbuf.count = snd_pcm_lib_period_bytes(ss);
h 418 sound/parisc/harmony.c if (h->pbuf.buf >= h->pbuf.size)
h 419 sound/parisc/harmony.c h->pbuf.buf = 0;
h 420 sound/parisc/harmony.c h->st.playing = 0;
h 422 sound/parisc/harmony.c h->st.rate = snd_harmony_rate_bits(rt->rate);
h 423 sound/parisc/harmony.c h->st.format = snd_harmony_set_data_format(h, rt->format, 0);
h 426 sound/parisc/harmony.c h->st.stereo = HARMONY_SS_STEREO;
h 428 sound/parisc/harmony.c h->st.stereo = HARMONY_SS_MONO;
h 430 sound/parisc/harmony.c harmony_set_control(h);
h 432 sound/parisc/harmony.c h->pbuf.addr = rt->dma_addr;
h 440 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 443 sound/parisc/harmony.c if (h->st.playing)
h 446 sound/parisc/harmony.c h->cbuf.size = snd_pcm_lib_buffer_bytes(ss);
h 447 sound/parisc/harmony.c h->cbuf.count = snd_pcm_lib_period_bytes(ss);
h 448 sound/parisc/harmony.c if (h->cbuf.buf >= h->cbuf.size)
h 449 sound/parisc/harmony.c h->cbuf.buf = 0;
h 450 sound/parisc/harmony.c h->st.capturing = 0;
h 452 sound/parisc/harmony.c h->st.rate = snd_harmony_rate_bits(rt->rate);
h 453 sound/parisc/harmony.c h->st.format = snd_harmony_set_data_format(h, rt->format, 0);
h 456 sound/parisc/harmony.c h->st.stereo = HARMONY_SS_STEREO;
h 458 sound/parisc/harmony.c h->st.stereo = HARMONY_SS_MONO;
h 460 sound/parisc/harmony.c harmony_set_control(h);
h 462 sound/parisc/harmony.c h->cbuf.addr = rt->dma_addr;
h 471 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 475 sound/parisc/harmony.c if (!(h->st.playing) || (h->psubs == NULL))
h 478 sound/parisc/harmony.c if ((h->pbuf.addr == 0) || (h->pbuf.size == 0))
h 481 sound/parisc/harmony.c pcuradd = harmony_read(h, HARMONY_PCURADD);
h 482 sound/parisc/harmony.c played = pcuradd - h->pbuf.addr;
h 486 sound/parisc/harmony.c pcuradd, h->pbuf.addr, played);
h 489 sound/parisc/harmony.c if (pcuradd > h->pbuf.addr + h->pbuf.size) {
h 500 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 504 sound/parisc/harmony.c if (!(h->st.capturing) || (h->csubs == NULL))
h 507 sound/parisc/harmony.c if ((h->cbuf.addr == 0) || (h->cbuf.size == 0))
h 510 sound/parisc/harmony.c rcuradd = harmony_read(h, HARMONY_RCURADD);
h 511 sound/parisc/harmony.c caught = rcuradd - h->cbuf.addr;
h 515 sound/parisc/harmony.c rcuradd, h->cbuf.addr, caught);
h 518 sound/parisc/harmony.c if (rcuradd > h->cbuf.addr + h->cbuf.size) {
h 528 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 532 sound/parisc/harmony.c h->psubs = ss;
h 547 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 551 sound/parisc/harmony.c h->csubs = ss;
h 566 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 567 sound/parisc/harmony.c h->psubs = NULL;
h 574 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 575 sound/parisc/harmony.c h->csubs = NULL;
h 584 sound/parisc/harmony.c struct snd_harmony *h = snd_pcm_substream_chip(ss);
h 587 sound/parisc/harmony.c if (err > 0 && h->dma.type == SNDRV_DMA_TYPE_CONTINUOUS)
h 627 sound/parisc/harmony.c harmony_disable_interrupts(h);
h 629 sound/parisc/harmony.c err = snd_pcm_new(h->card, "harmony", 0, 1, 1, &pcm);
h 638 sound/parisc/harmony.c pcm->private_data = h;
h 641 sound/parisc/harmony.c h->pcm = pcm;
h 643 sound/parisc/harmony.c h->psubs = NULL;
h 644 sound/parisc/harmony.c h->csubs = NULL;
h 647 sound/parisc/harmony.c h->dma.type = SNDRV_DMA_TYPE_DEV;
h 648 sound/parisc/harmony.c h->dma.dev = &h->dev->dev;
h 649 sound/parisc/harmony.c err = snd_dma_alloc_pages(h->dma.type,
h 650 sound/parisc/harmony.c h->dma.dev,
h 652 sound/parisc/harmony.c &h->gdma);
h 659 sound/parisc/harmony.c err = snd_dma_alloc_pages(h->dma.type,
h 660 sound/parisc/harmony.c h->dma.dev,
h 662 sound/parisc/harmony.c &h->sdma);
h 669 sound/parisc/harmony.c err = snd_pcm_lib_preallocate_pages_for_all(pcm, h->dma.type,
h 670 sound/parisc/harmony.c h->dma.dev,
h 678 sound/parisc/harmony.c h->st.format = snd_harmony_set_data_format(h,
h 687 sound/parisc/harmony.c harmony_wait_for_control(h);
h 688 sound/parisc/harmony.c harmony_write(h, HARMONY_GAINCTL, h->st.gain);
h 712 sound/parisc/harmony.c struct snd_harmony *h = snd_kcontrol_chip(kc);
h 719 sound/parisc/harmony.c spin_lock_irq(&h->mixer_lock);
h 721 sound/parisc/harmony.c left = (h->st.gain >> shift_left) & mask;
h 722 sound/parisc/harmony.c right = (h->st.gain >> shift_right) & mask;
h 732 sound/parisc/harmony.c spin_unlock_irq(&h->mixer_lock);
h 741 sound/parisc/harmony.c struct snd_harmony *h = snd_kcontrol_chip(kc);
h 747 sound/parisc/harmony.c int old_gain = h->st.gain;
h 749 sound/parisc/harmony.c spin_lock_irq(&h->mixer_lock);
h 754 sound/parisc/harmony.c h->st.gain &= ~( (mask << shift_left ) );
h 755 sound/parisc/harmony.c h->st.gain |= (left << shift_left);
h 761 sound/parisc/harmony.c h->st.gain &= ~( (mask << shift_right) );
h 762 sound/parisc/harmony.c h->st.gain |= (right << shift_right);
h 765 sound/parisc/harmony.c snd_harmony_set_new_gain(h);
h 767 sound/parisc/harmony.c spin_unlock_irq(&h->mixer_lock);
h 769 sound/parisc/harmony.c return h->st.gain != old_gain;
h 791 sound/parisc/harmony.c struct snd_harmony *h = snd_kcontrol_chip(kc);
h 794 sound/parisc/harmony.c spin_lock_irq(&h->mixer_lock);
h 796 sound/parisc/harmony.c value = (h->st.gain >> HARMONY_GAIN_IS_SHIFT) & 1;
h 799 sound/parisc/harmony.c spin_unlock_irq(&h->mixer_lock);
h 808 sound/parisc/harmony.c struct snd_harmony *h = snd_kcontrol_chip(kc);
h 810 sound/parisc/harmony.c int old_gain = h->st.gain;
h 812 sound/parisc/harmony.c spin_lock_irq(&h->mixer_lock);
h 815 sound/parisc/harmony.c h->st.gain &= ~HARMONY_GAIN_IS_MASK;
h 816 sound/parisc/harmony.c h->st.gain |= value << HARMONY_GAIN_IS_SHIFT;
h 818 sound/parisc/harmony.c snd_harmony_set_new_gain(h);
h 820 sound/parisc/harmony.c spin_unlock_irq(&h->mixer_lock);
h 822 sound/parisc/harmony.c return h->st.gain != old_gain;
h 859 sound/parisc/harmony.c harmony_mute(h);
h 860 sound/parisc/harmony.c harmony_reset(h);
h 861 sound/parisc/harmony.c h->st.gain = HARMONY_GAIN_DEFAULT;
h 862 sound/parisc/harmony.c harmony_unmute(h);
h 868 sound/parisc/harmony.c struct snd_card *card = h->card;
h 871 sound/parisc/harmony.c if (snd_BUG_ON(!h))
h 877 sound/parisc/harmony.c snd_ctl_new1(&snd_harmony_controls[idx], h));
h 882 sound/parisc/harmony.c snd_harmony_mixer_reset(h);
h 890 sound/parisc/harmony.c if (h->gdma.addr)
h 891 sound/parisc/harmony.c snd_dma_free_pages(&h->gdma);
h 892 sound/parisc/harmony.c if (h->sdma.addr)
h 893 sound/parisc/harmony.c snd_dma_free_pages(&h->sdma);
h 895 sound/parisc/harmony.c if (h->irq >= 0)
h 896 sound/parisc/harmony.c free_irq(h->irq, h);
h 898 sound/parisc/harmony.c if (h->iobase)
h 899 sound/parisc/harmony.c iounmap(h->iobase);
h 901 sound/parisc/harmony.c parisc_set_drvdata(h->dev, NULL);
h 903 sound/parisc/harmony.c kfree(h);
h 910 sound/parisc/harmony.c struct snd_harmony *h = dev->device_data;
h 911 sound/parisc/harmony.c return snd_harmony_free(h);
h 920 sound/parisc/harmony.c struct snd_harmony *h;
h 927 sound/parisc/harmony.c h = kzalloc(sizeof(*h), GFP_KERNEL);
h 928 sound/parisc/harmony.c if (h == NULL)
h 931 sound/parisc/harmony.c h->hpa = padev->hpa.start;
h 932 sound/parisc/harmony.c h->card = card;
h 933 sound/parisc/harmony.c h->dev = padev;
h 934 sound/parisc/harmony.c h->irq = -1;
h 935 sound/parisc/harmony.c h->iobase = ioremap_nocache(padev->hpa.start, HARMONY_SIZE);
h 936 sound/parisc/harmony.c if (h->iobase == NULL) {
h 944 sound/parisc/harmony.c "harmony", h);
h 950 sound/parisc/harmony.c h->irq = padev->irq;
h 952 sound/parisc/harmony.c spin_lock_init(&h->mixer_lock);
h 953 sound/parisc/harmony.c spin_lock_init(&h->lock);
h 956 sound/parisc/harmony.c h, &ops)) < 0) {
h 962 sound/parisc/harmony.c *rchip = h;
h 967 sound/parisc/harmony.c snd_harmony_free(h);
h 976 sound/parisc/harmony.c struct snd_harmony *h;
h 982 sound/parisc/harmony.c err = snd_harmony_create(card, padev, &h);
h 986 sound/parisc/harmony.c err = snd_harmony_pcm_init(h);
h 990 sound/parisc/harmony.c err = snd_harmony_mixer_init(h);
h 997 sound/parisc/harmony.c card->shortname, h->hpa, h->irq);
h 263 sound/pci/riptide/riptide.c #define SEND_LSEL(p,b,c,d,e,f,g,h) sendcmd(p,PARM,LSEL|BYTE1(b)|BYTE2(c)|BYTE3(d),BYTE0(e)|BYTE1(f)|BYTE2(g)|BYTE3(h),RET(0)) /* select paths for internal connections */