f 109 arch/x86/kernel/cpu/cpufreq/powernow-k7.c unsigned int f = fsb / 1000; f 111 arch/x86/kernel/cpu/cpufreq/powernow-k7.c delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed; f 14 arch/x86/kernel/cpu/mtrr/if.c #define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) f 167 arch/x86/kernel/early-quirks.c void (*f)(int num, int slot, int func); f 214 arch/x86/kernel/early-quirks.c for (i = 0; early_qrk[i].f != NULL; i++) { f 223 arch/x86/kernel/early-quirks.c early_qrk[i].f(num, slot, func); f 248 arch/x86/kernel/i387.c #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16); f 59 arch/x86/kernel/irqinit_64.c BI(x,c) BI(x,d) BI(x,e) BI(x,f) f 284 arch/x86/kernel/mca_32.c bus->f.mca_write_pos = mca_pc_write_pos; f 285 arch/x86/kernel/mca_32.c bus->f.mca_read_pos = mca_pc_read_pos; f 286 arch/x86/kernel/mca_32.c bus->f.mca_transform_irq = mca_dummy_transform_irq; f 287 arch/x86/kernel/mca_32.c bus->f.mca_transform_ioport = mca_dummy_transform_ioport; f 288 arch/x86/kernel/mca_32.c bus->f.mca_transform_memory = mca_dummy_transform_memory; f 124 arch/x86/kernel/tlb_64.c union smp_flush_state *f; f 132 arch/x86/kernel/tlb_64.c f = &per_cpu(flush_state, sender); f 134 arch/x86/kernel/tlb_64.c if (!cpu_isset(cpu, f->flush_cpumask)) f 145 arch/x86/kernel/tlb_64.c if (f->flush_mm == read_pda(active_mm)) { f 147 arch/x86/kernel/tlb_64.c if (f->flush_va == TLB_FLUSH_ALL) f 150 arch/x86/kernel/tlb_64.c __flush_tlb_one(f->flush_va); f 156 arch/x86/kernel/tlb_64.c cpu_clear(cpu, f->flush_cpumask); f 164 arch/x86/kernel/tlb_64.c union smp_flush_state *f; f 172 arch/x86/kernel/tlb_64.c f = &per_cpu(flush_state, sender); f 179 arch/x86/kernel/tlb_64.c spin_lock(&f->tlbstate_lock); f 181 arch/x86/kernel/tlb_64.c f->flush_mm = mm; f 182 arch/x86/kernel/tlb_64.c f->flush_va = va; f 183 arch/x86/kernel/tlb_64.c cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); f 191 arch/x86/kernel/tlb_64.c while (!cpus_empty(f->flush_cpumask)) f 194 arch/x86/kernel/tlb_64.c f->flush_mm = NULL; f 195 arch/x86/kernel/tlb_64.c f->flush_va = 0; f 196 arch/x86/kernel/tlb_64.c spin_unlock(&f->tlbstate_lock); f 171 arch/x86/math-emu/reg_compare.c int f = 0, c; f 177 arch/x86/math-emu/reg_compare.c f = SW_C3 | SW_C2 | SW_C0; f 181 arch/x86/math-emu/reg_compare.c f = SW_C0; f 184 arch/x86/math-emu/reg_compare.c f = SW_C3; f 187 arch/x86/math-emu/reg_compare.c f = 0; f 190 arch/x86/math-emu/reg_compare.c f = SW_C3 | SW_C2 | SW_C0; f 195 arch/x86/math-emu/reg_compare.c f = SW_C3 | SW_C2 | SW_C0; f 199 arch/x86/math-emu/reg_compare.c setcc(f); f 208 arch/x86/math-emu/reg_compare.c int f = 0, c; f 227 arch/x86/math-emu/reg_compare.c f = SW_C0; f 230 arch/x86/math-emu/reg_compare.c f = SW_C3; f 233 arch/x86/math-emu/reg_compare.c f = 0; f 236 arch/x86/math-emu/reg_compare.c f = SW_C3 | SW_C2 | SW_C0; f 241 arch/x86/math-emu/reg_compare.c f = SW_C3 | SW_C2 | SW_C0; f 245 arch/x86/math-emu/reg_compare.c setcc(f); f 254 arch/x86/math-emu/reg_compare.c int f = 0, c; f 277 arch/x86/math-emu/reg_compare.c f = SW_C0; f 280 arch/x86/math-emu/reg_compare.c f = SW_C3; f 283 arch/x86/math-emu/reg_compare.c f = 0; f 286 arch/x86/math-emu/reg_compare.c f = SW_C3 | SW_C2 | SW_C0; f 291 arch/x86/math-emu/reg_compare.c f = SW_C3 | SW_C2 | SW_C0; f 295 arch/x86/math-emu/reg_compare.c setcc(f); f 312 arch/x86/mm/kmmio.c struct kmmio_fault_page *f; f 315 arch/x86/mm/kmmio.c f = get_kmmio_fault_page(page); f 316 arch/x86/mm/kmmio.c if (f) { f 317 arch/x86/mm/kmmio.c if (!f->count) f 318 arch/x86/mm/kmmio.c arm_kmmio_fault_page(f->page, NULL); f 319 arch/x86/mm/kmmio.c f->count++; f 323 arch/x86/mm/kmmio.c f = kmalloc(sizeof(*f), GFP_ATOMIC); f 324 arch/x86/mm/kmmio.c if (!f) f 327 arch/x86/mm/kmmio.c f->count = 1; f 328 arch/x86/mm/kmmio.c f->page = page; f 329 arch/x86/mm/kmmio.c list_add_rcu(&f->list, kmmio_page_list(f->page)); f 331 arch/x86/mm/kmmio.c arm_kmmio_fault_page(f->page, NULL); f 340 arch/x86/mm/kmmio.c struct kmmio_fault_page *f; f 343 arch/x86/mm/kmmio.c f = get_kmmio_fault_page(page); f 344 arch/x86/mm/kmmio.c if (!f) f 347 arch/x86/mm/kmmio.c f->count--; f 348 arch/x86/mm/kmmio.c BUG_ON(f->count < 0); f 349 arch/x86/mm/kmmio.c if (!f->count) { f 350 arch/x86/mm/kmmio.c disarm_kmmio_fault_page(f->page, NULL); f 351 arch/x86/mm/kmmio.c f->release_next = *release_list; f 352 arch/x86/mm/kmmio.c *release_list = f; f 324 arch/x86/xen/enlighten.c int f; f 336 arch/x86/xen/enlighten.c for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { f 337 arch/x86/xen/enlighten.c frames[f] = virt_to_mfn(va); f 334 block/compat_ioctl.c struct floppy_struct *f; f 337 block/compat_ioctl.c f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL); f 342 block/compat_ioctl.c err = __get_user(f->size, &uf->size); f 343 block/compat_ioctl.c err |= __get_user(f->sect, &uf->sect); f 344 block/compat_ioctl.c err |= __get_user(f->head, &uf->head); f 345 block/compat_ioctl.c err |= __get_user(f->track, &uf->track); f 346 block/compat_ioctl.c err |= __get_user(f->stretch, &uf->stretch); f 347 block/compat_ioctl.c err |= __get_user(f->gap, &uf->gap); f 348 block/compat_ioctl.c err |= __get_user(f->rate, &uf->rate); f 349 block/compat_ioctl.c err |= __get_user(f->spec1, &uf->spec1); f 350 block/compat_ioctl.c err |= __get_user(f->fmt_gap, &uf->fmt_gap); f 352 block/compat_ioctl.c f->name = compat_ptr(name); f 363 block/compat_ioctl.c struct floppy_drive_params *f; f 366 block/compat_ioctl.c f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL); f 371 block/compat_ioctl.c err = __get_user(f->cmos, &uf->cmos); f 372 block/compat_ioctl.c err |= __get_user(f->max_dtr, &uf->max_dtr); f 373 block/compat_ioctl.c err |= __get_user(f->hlt, &uf->hlt); f 374 block/compat_ioctl.c err |= __get_user(f->hut, &uf->hut); f 375 block/compat_ioctl.c err |= __get_user(f->srt, &uf->srt); f 376 block/compat_ioctl.c err |= __get_user(f->spinup, &uf->spinup); f 377 block/compat_ioctl.c err |= __get_user(f->spindown, &uf->spindown); f 378 block/compat_ioctl.c err |= __get_user(f->spindown_offset, &uf->spindown_offset); f 379 block/compat_ioctl.c err |= __get_user(f->select_delay, &uf->select_delay); f 380 block/compat_ioctl.c err |= __get_user(f->rps, &uf->rps); f 381 block/compat_ioctl.c err |= __get_user(f->tracks, &uf->tracks); f 382 block/compat_ioctl.c err |= __get_user(f->timeout, &uf->timeout); f 383 block/compat_ioctl.c err |= __get_user(f->interleave_sect, &uf->interleave_sect); f 384 block/compat_ioctl.c err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors)); f 385 block/compat_ioctl.c err |= __get_user(f->flags, &uf->flags); f 386 block/compat_ioctl.c err |= __get_user(f->read_track, &uf->read_track); f 387 block/compat_ioctl.c err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect)); f 388 block/compat_ioctl.c err |= __get_user(f->checkfreq, &uf->checkfreq); f 389 block/compat_ioctl.c err |= __get_user(f->native_format, &uf->native_format); f 423 block/compat_ioctl.c struct floppy_struct *f = karg; f 426 block/compat_ioctl.c err = __put_user(f->size, &uf->size); f 427 block/compat_ioctl.c err |= __put_user(f->sect, &uf->sect); f 428 block/compat_ioctl.c err |= __put_user(f->head, &uf->head); f 429 block/compat_ioctl.c err |= __put_user(f->track, &uf->track); f 430 block/compat_ioctl.c err |= __put_user(f->stretch, &uf->stretch); f 431 block/compat_ioctl.c err |= __put_user(f->gap, &uf->gap); f 432 block/compat_ioctl.c err |= __put_user(f->rate, &uf->rate); f 433 block/compat_ioctl.c err |= __put_user(f->spec1, &uf->spec1); f 434 block/compat_ioctl.c err |= __put_user(f->fmt_gap, &uf->fmt_gap); f 435 block/compat_ioctl.c err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name); f 441 block/compat_ioctl.c struct floppy_drive_params *f = karg; f 444 block/compat_ioctl.c err = __put_user(f->cmos, &uf->cmos); f 445 block/compat_ioctl.c err |= __put_user(f->max_dtr, &uf->max_dtr); f 446 block/compat_ioctl.c err |= __put_user(f->hlt, &uf->hlt); f 447 block/compat_ioctl.c err |= __put_user(f->hut, &uf->hut); f 448 block/compat_ioctl.c err |= __put_user(f->srt, &uf->srt); f 449 block/compat_ioctl.c err |= __put_user(f->spinup, &uf->spinup); f 450 block/compat_ioctl.c err |= __put_user(f->spindown, &uf->spindown); f 451 block/compat_ioctl.c err |= __put_user(f->spindown_offset, &uf->spindown_offset); f 452 block/compat_ioctl.c err |= __put_user(f->select_delay, &uf->select_delay); f 453 block/compat_ioctl.c err |= __put_user(f->rps, &uf->rps); f 454 block/compat_ioctl.c err |= __put_user(f->tracks, &uf->tracks); f 455 block/compat_ioctl.c err |= __put_user(f->timeout, &uf->timeout); f 456 block/compat_ioctl.c err |= __put_user(f->interleave_sect, &uf->interleave_sect); f 457 block/compat_ioctl.c err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors)); f 458 block/compat_ioctl.c err |= __put_user(f->flags, &uf->flags); f 459 block/compat_ioctl.c err |= __put_user(f->read_track, &uf->read_track); f 460 block/compat_ioctl.c err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect)); f 461 block/compat_ioctl.c err |= __put_user(f->checkfreq, &uf->checkfreq); f 462 block/compat_ioctl.c err |= __put_user(f->native_format, &uf->native_format); f 469 block/compat_ioctl.c struct floppy_drive_struct *f = karg; f 472 block/compat_ioctl.c err = __put_user(f->flags, &uf->flags); f 473 block/compat_ioctl.c err |= __put_user(f->spinup_date, &uf->spinup_date); f 474 block/compat_ioctl.c err |= __put_user(f->select_date, &uf->select_date); f 475 block/compat_ioctl.c err |= __put_user(f->first_read_date, &uf->first_read_date); f 476 block/compat_ioctl.c err |= __put_user(f->probed_format, &uf->probed_format); f 477 block/compat_ioctl.c err |= __put_user(f->track, &uf->track); f 478 block/compat_ioctl.c err |= __put_user(f->maxblock, &uf->maxblock); f 479 block/compat_ioctl.c err |= __put_user(f->maxtrack, &uf->maxtrack); f 480 block/compat_ioctl.c err |= __put_user(f->generation, &uf->generation); f 481 block/compat_ioctl.c err |= __put_user(f->keep_data, &uf->keep_data); f 482 block/compat_ioctl.c err |= __put_user(f->fd_ref, &uf->fd_ref); f 483 block/compat_ioctl.c err |= __put_user(f->fd_device, &uf->fd_device); f 484 block/compat_ioctl.c err |= __put_user(f->last_checked, &uf->last_checked); f 485 block/compat_ioctl.c err |= __put_user((u64)f->dmabuf, &uf->dmabuf); f 486 block/compat_ioctl.c err |= __put_user((u64)f->bufblocks, &uf->bufblocks); f 492 block/compat_ioctl.c struct floppy_fdc_state *f = karg; f 495 block/compat_ioctl.c err = __put_user(f->spec1, &uf->spec1); f 496 block/compat_ioctl.c err |= __put_user(f->spec2, &uf->spec2); f 497 block/compat_ioctl.c err |= __put_user(f->dtr, &uf->dtr); f 498 block/compat_ioctl.c err |= __put_user(f->version, &uf->version); f 499 block/compat_ioctl.c err |= __put_user(f->dor, &uf->dor); f 500 block/compat_ioctl.c err |= __put_user(f->address, &uf->address); f 502 block/compat_ioctl.c (char *)&f->address + sizeof(f->address), sizeof(int)); f 503 block/compat_ioctl.c err |= __put_user(f->driver_version, &uf->driver_version); f 504 block/compat_ioctl.c err |= __copy_to_user(uf->track, f->track, sizeof(f->track)); f 510 block/compat_ioctl.c struct floppy_write_errors *f = karg; f 513 block/compat_ioctl.c err = __put_user(f->write_errors, &uf->write_errors); f 514 block/compat_ioctl.c err |= __put_user(f->first_error_sector, &uf->first_error_sector); f 515 block/compat_ioctl.c err |= __put_user(f->first_error_generation, &uf->first_error_generation); f 516 block/compat_ioctl.c err |= __put_user(f->last_error_sector, &uf->last_error_sector); f 517 block/compat_ioctl.c err |= __put_user(f->last_error_generation, &uf->last_error_generation); f 518 block/compat_ioctl.c err |= __put_user(f->badness, &uf->badness); f 36 crypto/md5.c (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) f 47 crypto/rmd128.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ f 50 crypto/rmd160.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ f 47 crypto/rmd256.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ f 50 crypto/rmd320.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ f 61 crypto/sha256_generic.c u32 a, b, c, d, e, f, g, h, t1, t2; f 75 crypto/sha256_generic.c e=state[4]; f=state[5]; g=state[6]; h=state[7]; f 78 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0]; f 80 crypto/sha256_generic.c t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1]; f 82 crypto/sha256_generic.c t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2]; f 83 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 85 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 87 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 89 crypto/sha256_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 91 crypto/sha256_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 92 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0xab1c5ed5 + W[ 7]; f 95 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0xd807aa98 + W[ 8]; f 97 crypto/sha256_generic.c t1 = g + e1(d) + Ch(d,e,f) + 0x12835b01 + W[ 9]; f 99 crypto/sha256_generic.c t1 = f + e1(c) + Ch(c,d,e) + 0x243185be + W[10]; f 100 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 102 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 104 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 106 crypto/sha256_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 108 crypto/sha256_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 109 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0xc19bf174 + W[15]; f 112 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0xe49b69c1 + W[16]; f 114 crypto/sha256_generic.c t1 = g + e1(d) + Ch(d,e,f) + 0xefbe4786 + W[17]; f 116 crypto/sha256_generic.c t1 = f + e1(c) + Ch(c,d,e) + 0x0fc19dc6 + W[18]; f 117 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 119 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 121 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 123 crypto/sha256_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 125 crypto/sha256_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 126 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x76f988da + W[23]; f 129 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x983e5152 + W[24]; f 131 crypto/sha256_generic.c t1 = g + e1(d) + Ch(d,e,f) + 0xa831c66d + W[25]; f 133 crypto/sha256_generic.c t1 = f + e1(c) + Ch(c,d,e) + 0xb00327c8 + W[26]; f 134 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 136 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 138 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 140 crypto/sha256_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 142 crypto/sha256_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 143 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x14292967 + W[31]; f 146 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x27b70a85 + W[32]; f 148 crypto/sha256_generic.c t1 = g + e1(d) + Ch(d,e,f) + 0x2e1b2138 + W[33]; f 150 crypto/sha256_generic.c t1 = f + e1(c) + Ch(c,d,e) + 0x4d2c6dfc + W[34]; f 151 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 153 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 155 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 157 crypto/sha256_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 159 crypto/sha256_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 160 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x92722c85 + W[39]; f 163 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0xa2bfe8a1 + W[40]; f 165 crypto/sha256_generic.c t1 = g + e1(d) + Ch(d,e,f) + 0xa81a664b + W[41]; f 167 crypto/sha256_generic.c t1 = f + e1(c) + Ch(c,d,e) + 0xc24b8b70 + W[42]; f 168 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 170 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 172 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 174 crypto/sha256_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 176 crypto/sha256_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 177 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x106aa070 + W[47]; f 180 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x19a4c116 + W[48]; f 182 crypto/sha256_generic.c t1 = g + e1(d) + Ch(d,e,f) + 0x1e376c08 + W[49]; f 184 crypto/sha256_generic.c t1 = f + e1(c) + Ch(c,d,e) + 0x2748774c + W[50]; f 185 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 187 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 189 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 191 crypto/sha256_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 193 crypto/sha256_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 194 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0x682e6ff3 + W[55]; f 197 crypto/sha256_generic.c t1 = h + e1(e) + Ch(e,f,g) + 0x748f82ee + W[56]; f 199 crypto/sha256_generic.c t1 = g + e1(d) + Ch(d,e,f) + 0x78a5636f + W[57]; f 201 crypto/sha256_generic.c t1 = f + e1(c) + Ch(c,d,e) + 0x84c87814 + W[58]; f 202 crypto/sha256_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 204 crypto/sha256_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 206 crypto/sha256_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 208 crypto/sha256_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 210 crypto/sha256_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 211 crypto/sha256_generic.c t1 = a + e1(f) + Ch(f,g,h) + 0xc67178f2 + W[63]; f 215 crypto/sha256_generic.c state[4] += e; state[5] += f; state[6] += g; state[7] += h; f 218 crypto/sha256_generic.c a = b = c = d = e = f = g = h = t1 = t2 = 0; f 94 crypto/sha512_generic.c u64 a, b, c, d, e, f, g, h, t1, t2; f 108 crypto/sha512_generic.c e=state[4]; f=state[5]; g=state[6]; h=state[7]; f 112 crypto/sha512_generic.c t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ]; f 114 crypto/sha512_generic.c t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1]; f 116 crypto/sha512_generic.c t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2]; f 117 crypto/sha512_generic.c t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; f 119 crypto/sha512_generic.c t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; f 121 crypto/sha512_generic.c t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; f 123 crypto/sha512_generic.c t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; f 125 crypto/sha512_generic.c t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; f 126 crypto/sha512_generic.c t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7]; f 131 crypto/sha512_generic.c state[4] += e; state[5] += f; state[6] += g; state[7] += h; f 134 crypto/sha512_generic.c a = b = c = d = e = f = g = h = t1 = t2 = 0; f 102 crypto/xor.c struct xor_block_template *f, *fastest; f 133 crypto/xor.c for (f = fastest; f; f = f->next) f 134 crypto/xor.c if (f->speed > fastest->speed) f 135 crypto/xor.c fastest = f; f 32 fs/bfs/dir.c struct inode *dir = f->f_path.dentry->d_inode; f 41 fs/bfs/dir.c if (f->f_pos & (BFS_DIRENT_SIZE - 1)) { f 43 fs/bfs/dir.c (unsigned long)f->f_pos, f 49 fs/bfs/dir.c while (f->f_pos < dir->i_size) { f 50 fs/bfs/dir.c offset = f->f_pos & (BFS_BSIZE - 1); f 51 fs/bfs/dir.c block = BFS_I(dir)->i_sblock + (f->f_pos >> BFS_BSIZE_BITS); f 54 fs/bfs/dir.c f->f_pos += BFS_BSIZE - offset; f 61 fs/bfs/dir.c if (filldir(dirent, de->name, size, f->f_pos, f 70 fs/bfs/dir.c f->f_pos += BFS_DIRENT_SIZE; f 71 fs/bfs/dir.c } while ((offset < BFS_BSIZE) && (f->f_pos < dir->i_size)); f 76 fs/char_dev.c seq_printf(f, "%3d %s\n", cd->major, cd->name); f 164 fs/cifs/md5.c (w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x) f 32 fs/coda/coda_linux.c sprintf(s, "(%08x.%08x.%08x.%08x)", f->opaque[0], f->opaque[1], f->opaque[2], f->opaque[3]); f 398 fs/compat.c struct flock f; f 405 fs/compat.c ret = get_compat_flock(&f, compat_ptr(arg)); f 410 fs/compat.c ret = sys_fcntl(fd, cmd, (unsigned long)&f); f 424 fs/compat.c if (f.l_start > COMPAT_OFF_T_MAX) f 426 fs/compat.c if (f.l_len > COMPAT_OFF_T_MAX) f 427 fs/compat.c f.l_len = COMPAT_OFF_T_MAX; f 429 fs/compat.c ret = put_compat_flock(&f, compat_ptr(arg)); f 436 fs/compat.c ret = get_compat_flock64(&f, compat_ptr(arg)); f 443 fs/compat.c (unsigned long)&f); f 447 fs/compat.c if (f.l_start > COMPAT_LOFF_T_MAX) f 449 fs/compat.c if (f.l_len > COMPAT_LOFF_T_MAX) f 450 fs/compat.c f.l_len = COMPAT_LOFF_T_MAX; f 452 fs/compat.c ret = put_compat_flock64(&f, compat_ptr(arg)); f 70 fs/dnotify.c struct file *f; f 98 fs/dnotify.c f = fcheck(fd); f 103 fs/dnotify.c if (f != filp) f 493 fs/eventpoll.c return f->f_op == &eventpoll_fops; f 76 fs/ext2/xattr.c printk(f); \ f 84 fs/ext2/xattr.c printk(f); \ f 80 fs/ext3/xattr.c printk(f); \ f 88 fs/ext3/xattr.c printk(f); \ f 49 fs/ext4/ext4.h printk(KERN_DEBUG f, ## a); \ f 73 fs/ext4/xattr.c printk(f); \ f 81 fs/ext4/xattr.c printk(f); \ f 67 fs/file.c struct fdtable_defer *f = f 71 fs/file.c spin_lock_bh(&f->lock); f 72 fs/file.c fdt = f->next; f 73 fs/file.c f->next = NULL; f 74 fs/file.c spin_unlock_bh(&f->lock); f 367 fs/file.c struct file *f = *old_fds++; f 368 fs/file.c if (f) { f 369 fs/file.c get_file(f); f 379 fs/file.c rcu_assign_pointer(*new_fds++, f); f 39 fs/file_table.c struct file *f = container_of(head, struct file, f_u.fu_rcuhead); f 40 fs/file_table.c kmem_cache_free(filp_cachep, f); f 46 fs/file_table.c file_check_state(f); f 47 fs/file_table.c call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); f 99 fs/file_table.c struct file * f; f 113 fs/file_table.c f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); f 114 fs/file_table.c if (f == NULL) f 118 fs/file_table.c if (security_file_alloc(f)) f 122 fs/file_table.c INIT_LIST_HEAD(&f->f_u.fu_list); f 123 fs/file_table.c atomic_long_set(&f->f_count, 1); f 124 fs/file_table.c rwlock_init(&f->f_owner.lock); f 125 fs/file_table.c f->f_uid = tsk->fsuid; f 126 fs/file_table.c f->f_gid = tsk->fsgid; f 127 fs/file_table.c eventpoll_init_file(f); f 129 fs/file_table.c return f; f 141 fs/file_table.c file_free(f); f 426 fs/hpfs/alloc.c struct fnode *f; f 428 fs/hpfs/alloc.c if (!(f = hpfs_get_sector(s, *fno, bh))) { f 432 fs/hpfs/alloc.c memset(f, 0, 512); f 433 fs/hpfs/alloc.c f->magic = FNODE_MAGIC; f 434 fs/hpfs/alloc.c f->ea_offs = 0xc4; f 435 fs/hpfs/alloc.c f->btree.n_free_nodes = 8; f 436 fs/hpfs/alloc.c f->btree.first_free = 8; f 437 fs/hpfs/alloc.c return f; f 399 fs/hpfs/anode.c anode_secno node = f; f 403 fs/hpfs/anode.c if (!(fnode = hpfs_map_fnode(s, f, &bh))) return; f 406 fs/hpfs/anode.c if (!(anode = hpfs_map_anode(s, f, &bh))) return; f 417 fs/hpfs/anode.c } else hpfs_free_sectors(s, f, 1); f 424 fs/hpfs/anode.c if (btree->u.internal[i].file_secno >= secs) goto f; f 428 fs/hpfs/anode.c f: f 454 fs/hpfs/anode.c hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs); f 77 fs/hpfs/dnode.c for (i = hpfs_inode->i_rddir_off; *i; i++) (*f)(*i, p1, p2); f 83 fs/hpfs/dnode.c if (*p == f) *p = t; f 1002 fs/hpfs/dnode.c name1 = f->name; f 1007 fs/hpfs/dnode.c if (f->len <= 15) f 1008 fs/hpfs/dnode.c memcpy(name2, name1, name1len = name2len = f->len); f 1015 fs/hpfs/dnode.c if (!(upf = hpfs_map_fnode(s, f->up, &bh))) { f 1021 fs/hpfs/dnode.c hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, f->up); f 1038 fs/hpfs/dnode.c if (de->down) if (de_down_pointer(de) == downd) goto f; f 1062 fs/hpfs/dnode.c f: f 181 fs/jffs2/acl.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 188 fs/jffs2/acl.c acl = jffs2_iget_acl(inode, &f->i_acl_access); f 194 fs/jffs2/acl.c acl = jffs2_iget_acl(inode, &f->i_acl_default); f 221 fs/jffs2/acl.c jffs2_iset_acl(inode, &f->i_acl_access, acl); f 224 fs/jffs2/acl.c jffs2_iset_acl(inode, &f->i_acl_default, acl); f 252 fs/jffs2/acl.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 291 fs/jffs2/acl.c jffs2_iset_acl(inode, &f->i_acl_access, acl); f 294 fs/jffs2/acl.c jffs2_iset_acl(inode, &f->i_acl_default, acl); f 324 fs/jffs2/acl.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 328 fs/jffs2/acl.c f->i_acl_default = NULL; f 329 fs/jffs2/acl.c f->i_acl_access = NULL; f 342 fs/jffs2/acl.c jffs2_iset_acl(inode, &f->i_acl_default, acl); f 353 fs/jffs2/acl.c jffs2_iset_acl(inode, &f->i_acl_access, clone); f 362 fs/jffs2/acl.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 365 fs/jffs2/acl.c if (f->i_acl_default) { f 366 fs/jffs2/acl.c rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, f->i_acl_default); f 371 fs/jffs2/acl.c if (f->i_acl_access) { f 372 fs/jffs2/acl.c rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, f->i_acl_access); f 382 fs/jffs2/acl.c if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) { f 383 fs/jffs2/acl.c posix_acl_release(f->i_acl_access); f 384 fs/jffs2/acl.c f->i_acl_access = JFFS2_ACL_NOT_CACHED; f 386 fs/jffs2/acl.c if (f->i_acl_default && f->i_acl_default != JFFS2_ACL_NOT_CACHED) { f 387 fs/jffs2/acl.c posix_acl_release(f->i_acl_default); f 388 fs/jffs2/acl.c f->i_acl_default = JFFS2_ACL_NOT_CACHED; f 65 fs/jffs2/debug.c mutex_lock(&f->sem); f 66 fs/jffs2/debug.c __jffs2_dbg_fragtree_paranoia_check_nolock(f); f 67 fs/jffs2/debug.c mutex_unlock(&f->sem); f 76 fs/jffs2/debug.c for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { f 112 fs/jffs2/debug.c __jffs2_dbg_dump_fragtree_nolock(f); f 689 fs/jffs2/debug.c mutex_lock(&f->sem); f 690 fs/jffs2/debug.c jffs2_dbg_dump_fragtree_nolock(f); f 691 fs/jffs2/debug.c mutex_unlock(&f->sem); f 697 fs/jffs2/debug.c struct jffs2_node_frag *this = frag_first(&f->fragtree); f 701 fs/jffs2/debug.c printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino); f 718 fs/jffs2/debug.c if (f->metadata) f 719 fs/jffs2/debug.c printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); f 235 fs/jffs2/debug.h __jffs2_dbg_fragtree_paranoia_check(f) f 237 fs/jffs2/debug.h __jffs2_dbg_fragtree_paranoia_check_nolock(f) f 262 fs/jffs2/debug.h __jffs2_dbg_dump_fragtree(f); f 264 fs/jffs2/debug.h __jffs2_dbg_dump_fragtree_nolock(f); f 121 fs/jffs2/dir.c struct jffs2_inode_info *f; f 129 fs/jffs2/dir.c f = JFFS2_INODE_INFO(inode); f 149 fs/jffs2/dir.c mutex_lock(&f->sem); f 150 fs/jffs2/dir.c for (fd = f->dents; fd; fd = fd->next) { f 169 fs/jffs2/dir.c mutex_unlock(&f->sem); f 182 fs/jffs2/dir.c struct jffs2_inode_info *f, *dir_f; f 208 fs/jffs2/dir.c f = JFFS2_INODE_INFO(inode); f 216 fs/jffs2/dir.c mutex_unlock(&f->sem); f 218 fs/jffs2/dir.c ret = jffs2_do_create(c, dir_f, f, ri, f 230 fs/jffs2/dir.c f->inocache->pino_nlink, inode->i_mapping->nrpages)); f 265 fs/jffs2/dir.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); f 272 fs/jffs2/dir.c if (!f->inocache) f 283 fs/jffs2/dir.c ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now); f 286 fs/jffs2/dir.c mutex_lock(&f->sem); f 287 fs/jffs2/dir.c old_dentry->d_inode->i_nlink = ++f->inocache->pino_nlink; f 288 fs/jffs2/dir.c mutex_unlock(&f->sem); f 300 fs/jffs2/dir.c struct jffs2_inode_info *f, *dir_f; f 345 fs/jffs2/dir.c f = JFFS2_INODE_INFO(inode); f 356 fs/jffs2/dir.c fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL); f 362 fs/jffs2/dir.c mutex_unlock(&f->sem); f 369 fs/jffs2/dir.c f->target = kmalloc(targetlen + 1, GFP_KERNEL); f 370 fs/jffs2/dir.c if (!f->target) { f 372 fs/jffs2/dir.c mutex_unlock(&f->sem); f 378 fs/jffs2/dir.c memcpy(f->target, target, targetlen + 1); f 379 fs/jffs2/dir.c D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target)); f 384 fs/jffs2/dir.c f->metadata = fn; f 385 fs/jffs2/dir.c mutex_unlock(&f->sem); f 463 fs/jffs2/dir.c struct jffs2_inode_info *f, *dir_f; f 505 fs/jffs2/dir.c f = JFFS2_INODE_INFO(inode); f 510 fs/jffs2/dir.c f->inocache->pino_nlink = dir_i->i_ino; f 515 fs/jffs2/dir.c fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); f 521 fs/jffs2/dir.c mutex_unlock(&f->sem); f 529 fs/jffs2/dir.c f->metadata = fn; f 530 fs/jffs2/dir.c mutex_unlock(&f->sem); f 610 fs/jffs2/dir.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); f 615 fs/jffs2/dir.c for (fd = f->dents ; fd; fd = fd->next) { f 621 fs/jffs2/dir.c dentry->d_name.len, f, now); f 632 fs/jffs2/dir.c struct jffs2_inode_info *f, *dir_f; f 679 fs/jffs2/dir.c f = JFFS2_INODE_INFO(inode); f 689 fs/jffs2/dir.c fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, ALLOC_NORMAL); f 695 fs/jffs2/dir.c mutex_unlock(&f->sem); f 703 fs/jffs2/dir.c f->metadata = fn; f 704 fs/jffs2/dir.c mutex_unlock(&f->sem); f 860 fs/jffs2/dir.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); f 861 fs/jffs2/dir.c mutex_lock(&f->sem); f 863 fs/jffs2/dir.c if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode)) f 864 fs/jffs2/dir.c f->inocache->pino_nlink++; f 865 fs/jffs2/dir.c mutex_unlock(&f->sem); f 76 fs/jffs2/file.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 88 fs/jffs2/file.c ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); f 115 fs/jffs2/file.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); f 118 fs/jffs2/file.c mutex_lock(&f->sem); f 120 fs/jffs2/file.c mutex_unlock(&f->sem); f 130 fs/jffs2/file.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 157 fs/jffs2/file.c mutex_lock(&f->sem); f 165 fs/jffs2/file.c ri.ino = cpu_to_je32(f->inocache->ino); f 166 fs/jffs2/file.c ri.version = cpu_to_je32(++f->highest_version); f 179 fs/jffs2/file.c fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL); f 184 fs/jffs2/file.c mutex_unlock(&f->sem); f 187 fs/jffs2/file.c ret = jffs2_add_full_dnode_to_inode(c, f, fn); f 188 fs/jffs2/file.c if (f->metadata) { f 189 fs/jffs2/file.c jffs2_mark_node_obsolete(c, f->metadata->raw); f 190 fs/jffs2/file.c jffs2_free_full_dnode(f->metadata); f 191 fs/jffs2/file.c f->metadata = NULL; f 198 fs/jffs2/file.c mutex_unlock(&f->sem); f 203 fs/jffs2/file.c mutex_unlock(&f->sem); f 212 fs/jffs2/file.c mutex_lock(&f->sem); f 214 fs/jffs2/file.c mutex_unlock(&f->sem); f 235 fs/jffs2/file.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 282 fs/jffs2/file.c ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, f 30 fs/jffs2/fs.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 54 fs/jffs2/fs.c mutex_lock(&f->sem); f 55 fs/jffs2/fs.c mdatalen = f->metadata->size; f 56 fs/jffs2/fs.c mdata = kmalloc(f->metadata->size, GFP_USER); f 58 fs/jffs2/fs.c mutex_unlock(&f->sem); f 61 fs/jffs2/fs.c ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); f 63 fs/jffs2/fs.c mutex_unlock(&f->sem); f 67 fs/jffs2/fs.c mutex_unlock(&f->sem); f 86 fs/jffs2/fs.c mutex_lock(&f->sem); f 95 fs/jffs2/fs.c ri->version = cpu_to_je32(++f->highest_version); f 130 fs/jffs2/fs.c new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type); f 137 fs/jffs2/fs.c mutex_unlock(&f->sem); f 149 fs/jffs2/fs.c old_metadata = f->metadata; f 152 fs/jffs2/fs.c jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); f 155 fs/jffs2/fs.c jffs2_add_full_dnode_to_inode(c, f, new_metadata); f 158 fs/jffs2/fs.c f->metadata = NULL; f 160 fs/jffs2/fs.c f->metadata = new_metadata; f 168 fs/jffs2/fs.c mutex_unlock(&f->sem); f 231 fs/jffs2/fs.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 234 fs/jffs2/fs.c jffs2_do_clear_inode(c, f); f 239 fs/jffs2/fs.c struct jffs2_inode_info *f; f 255 fs/jffs2/fs.c f = JFFS2_INODE_INFO(inode); f 258 fs/jffs2/fs.c jffs2_init_inode_info(f); f 259 fs/jffs2/fs.c mutex_lock(&f->sem); f 261 fs/jffs2/fs.c ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); f 264 fs/jffs2/fs.c mutex_unlock(&f->sem); f 276 fs/jffs2/fs.c inode->i_nlink = f->inocache->pino_nlink; f 291 fs/jffs2/fs.c for (fd=f->dents; fd; fd = fd->next) { f 313 fs/jffs2/fs.c if (f->metadata->size != sizeof(jdev.old) && f 314 fs/jffs2/fs.c f->metadata->size != sizeof(jdev.new)) { f 315 fs/jffs2/fs.c printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); f 319 fs/jffs2/fs.c ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); f 325 fs/jffs2/fs.c if (f->metadata->size == sizeof(jdev.old)) f 340 fs/jffs2/fs.c mutex_unlock(&f->sem); f 349 fs/jffs2/fs.c mutex_unlock(&f->sem); f 350 fs/jffs2/fs.c jffs2_do_clear_inode(c, f); f 425 fs/jffs2/fs.c struct jffs2_inode_info *f; f 437 fs/jffs2/fs.c f = JFFS2_INODE_INFO(inode); f 438 fs/jffs2/fs.c jffs2_init_inode_info(f); f 439 fs/jffs2/fs.c mutex_lock(&f->sem); f 461 fs/jffs2/fs.c ret = jffs2_do_new_inode (c, f, mode, ri); f 584 fs/jffs2/fs.c iput(OFNI_EDONI_2SFFJ(f)); f 656 fs/jffs2/fs.c struct inode *inode = OFNI_EDONI_2SFFJ(f); f 121 fs/jffs2/gc.c struct jffs2_inode_info *f; f 404 fs/jffs2/gc.c f = jffs2_gc_fetch_inode(c, inum, !nlink); f 405 fs/jffs2/gc.c if (IS_ERR(f)) { f 406 fs/jffs2/gc.c ret = PTR_ERR(f); f 409 fs/jffs2/gc.c if (!f) { f 414 fs/jffs2/gc.c ret = jffs2_garbage_collect_live(c, jeb, raw, f); f 416 fs/jffs2/gc.c jffs2_gc_release_inode(c, f); f 454 fs/jffs2/gc.c mutex_lock(&f->sem); f 475 fs/jffs2/gc.c if (f->metadata && f->metadata->raw == raw) { f 476 fs/jffs2/gc.c fn = f->metadata; f 477 fs/jffs2/gc.c ret = jffs2_garbage_collect_metadata(c, jeb, f, fn); f 482 fs/jffs2/gc.c for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { f 494 fs/jffs2/gc.c ret = jffs2_garbage_collect_pristine(c, f->inocache, raw); f 497 fs/jffs2/gc.c frag->node->raw = f->inocache->nodes; f 505 fs/jffs2/gc.c ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); f 508 fs/jffs2/gc.c ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end); f 514 fs/jffs2/gc.c for (fd = f->dents; fd; fd=fd->next) { f 520 fs/jffs2/gc.c ret = jffs2_garbage_collect_dirent(c, jeb, f, fd); f 522 fs/jffs2/gc.c ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd); f 525 fs/jffs2/gc.c ref_offset(raw), f->inocache->ino); f 534 fs/jffs2/gc.c mutex_unlock(&f->sem); f 707 fs/jffs2/gc.c if (S_ISBLK(JFFS2_F_I_MODE(f)) || f 708 fs/jffs2/gc.c S_ISCHR(JFFS2_F_I_MODE(f)) ) { f 710 fs/jffs2/gc.c mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); f 713 fs/jffs2/gc.c } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { f 720 fs/jffs2/gc.c ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen); f 738 fs/jffs2/gc.c last_frag = frag_last(&f->fragtree); f 744 fs/jffs2/gc.c ilen = JFFS2_F_I_SIZE(f); f 752 fs/jffs2/gc.c ri.ino = cpu_to_je32(f->inocache->ino); f 753 fs/jffs2/gc.c ri.version = cpu_to_je32(++f->highest_version); f 754 fs/jffs2/gc.c ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); f 755 fs/jffs2/gc.c ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); f 756 fs/jffs2/gc.c ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); f 758 fs/jffs2/gc.c ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); f 759 fs/jffs2/gc.c ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); f 760 fs/jffs2/gc.c ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); f 768 fs/jffs2/gc.c new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC); f 777 fs/jffs2/gc.c f->metadata = new_fn; f 779 fs/jffs2/gc.c if (S_ISLNK(JFFS2_F_I_MODE(f))) f 798 fs/jffs2/gc.c rd.pino = cpu_to_je32(f->inocache->ino); f 799 fs/jffs2/gc.c rd.version = cpu_to_je32(++f->highest_version); f 803 fs/jffs2/gc.c if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f)) f 804 fs/jffs2/gc.c rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f)); f 818 fs/jffs2/gc.c new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC); f 824 fs/jffs2/gc.c jffs2_add_fd_to_list(c, new_fd, &f->dents); f 831 fs/jffs2/gc.c struct jffs2_full_dirent **fdp = &f->dents; f 857 fs/jffs2/gc.c for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) { f 914 fs/jffs2/gc.c return jffs2_garbage_collect_dirent(c, jeb, f, fd); f 934 fs/jffs2/gc.c printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino); f 952 fs/jffs2/gc.c f->inocache->ino, start, end)); f 985 fs/jffs2/gc.c start, end, f->inocache->ino); f 991 fs/jffs2/gc.c start, end, f->inocache->ino); f 1001 fs/jffs2/gc.c ri.ino = cpu_to_je32(f->inocache->ino); f 1002 fs/jffs2/gc.c ri.version = cpu_to_je32(++f->highest_version); f 1009 fs/jffs2/gc.c frag = frag_last(&f->fragtree); f 1015 fs/jffs2/gc.c ilen = JFFS2_F_I_SIZE(f); f 1017 fs/jffs2/gc.c ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); f 1018 fs/jffs2/gc.c ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); f 1019 fs/jffs2/gc.c ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); f 1021 fs/jffs2/gc.c ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); f 1022 fs/jffs2/gc.c ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); f 1023 fs/jffs2/gc.c ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); f 1034 fs/jffs2/gc.c new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC); f 1040 fs/jffs2/gc.c if (je32_to_cpu(ri.version) == f->highest_version) { f 1041 fs/jffs2/gc.c jffs2_add_full_dnode_to_inode(c, f, new_fn); f 1042 fs/jffs2/gc.c if (f->metadata) { f 1043 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, f->metadata->raw); f 1044 fs/jffs2/gc.c jffs2_free_full_dnode(f->metadata); f 1045 fs/jffs2/gc.c f->metadata = NULL; f 1058 fs/jffs2/gc.c fn->frags, je32_to_cpu(ri.version), f->highest_version, f 1065 fs/jffs2/gc.c for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); f 1105 fs/jffs2/gc.c f->inocache->ino, start, end)); f 1124 fs/jffs2/gc.c frag = jffs2_lookup_node_frag(&f->fragtree, start); f 1179 fs/jffs2/gc.c frag = jffs2_lookup_node_frag(&f->fragtree, end-1); f 1228 fs/jffs2/gc.c D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); f 1240 fs/jffs2/gc.c pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); f 1266 fs/jffs2/gc.c comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); f 1273 fs/jffs2/gc.c ri.ino = cpu_to_je32(f->inocache->ino); f 1274 fs/jffs2/gc.c ri.version = cpu_to_je32(++f->highest_version); f 1275 fs/jffs2/gc.c ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f)); f 1276 fs/jffs2/gc.c ri.uid = cpu_to_je16(JFFS2_F_I_UID(f)); f 1277 fs/jffs2/gc.c ri.gid = cpu_to_je16(JFFS2_F_I_GID(f)); f 1278 fs/jffs2/gc.c ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f)); f 1279 fs/jffs2/gc.c ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f)); f 1280 fs/jffs2/gc.c ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f)); f 1281 fs/jffs2/gc.c ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f)); f 1290 fs/jffs2/gc.c new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC); f 1299 fs/jffs2/gc.c ret = jffs2_add_full_dnode_to_inode(c, f, new_fn); f 1301 fs/jffs2/gc.c if (f->metadata) { f 1302 fs/jffs2/gc.c jffs2_mark_node_obsolete(c, f->metadata->raw); f 1303 fs/jffs2/gc.c jffs2_free_full_dnode(f->metadata); f 1304 fs/jffs2/gc.c f->metadata = NULL; f 378 fs/jffs2/nodelist.c ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); f 402 fs/jffs2/nodelist.c jffs2_dbg_fragtree_paranoia_check_nolock(f); f 23 fs/jffs2/os-linux.h #define OFNI_EDONI_2SFFJ(f) (&(f)->vfs_inode) f 28 fs/jffs2/os-linux.h #define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size) f 29 fs/jffs2/os-linux.h #define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode) f 30 fs/jffs2/os-linux.h #define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid) f 31 fs/jffs2/os-linux.h #define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid) f 32 fs/jffs2/os-linux.h #define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev) f 36 fs/jffs2/os-linux.h #define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime.tv_sec) f 37 fs/jffs2/os-linux.h #define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime.tv_sec) f 38 fs/jffs2/os-linux.h #define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime.tv_sec) f 52 fs/jffs2/os-linux.h f->highest_version = 0; f 53 fs/jffs2/os-linux.h f->fragtree = RB_ROOT; f 54 fs/jffs2/os-linux.h f->metadata = NULL; f 55 fs/jffs2/os-linux.h f->dents = NULL; f 56 fs/jffs2/os-linux.h f->target = NULL; f 57 fs/jffs2/os-linux.h f->flags = 0; f 58 fs/jffs2/os-linux.h f->usercompr = 0; f 60 fs/jffs2/os-linux.h f->i_acl_access = JFFS2_ACL_NOT_CACHED; f 61 fs/jffs2/os-linux.h f->i_acl_default = JFFS2_ACL_NOT_CACHED; f 131 fs/jffs2/read.c ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); f 161 fs/jffs2/read.c f->inocache->ino, offset, offset+len)); f 163 fs/jffs2/read.c frag = jffs2_lookup_node_frag(&f->fragtree, offset); f 173 fs/jffs2/read.c D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); f 198 fs/jffs2/read.c ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); f 491 fs/jffs2/readinode.c ret = jffs2_add_full_dnode_to_inode(c, f, this->fn); f 965 fs/jffs2/readinode.c dbg_readinode("ino #%u\n", f->inocache->ino); f 975 fs/jffs2/readinode.c valid_ref = jffs2_first_valid_node(f->inocache->nodes); f 976 fs/jffs2/readinode.c if (!valid_ref && f->inocache->ino != 1) f 977 fs/jffs2/readinode.c JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino); f 1103 fs/jffs2/readinode.c f->highest_version = rii->highest_version; f 1106 fs/jffs2/readinode.c f->inocache->ino, rii->highest_version, rii->latest_mctime, f 1127 fs/jffs2/readinode.c dbg_readinode("ino #%u pino/nlink is %d\n", f->inocache->ino, f 1128 fs/jffs2/readinode.c f->inocache->pino_nlink); f 1133 fs/jffs2/readinode.c ret = jffs2_get_inode_nodes(c, f, &rii); f 1136 fs/jffs2/readinode.c JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret); f 1137 fs/jffs2/readinode.c if (f->inocache->state == INO_STATE_READING) f 1138 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); f 1142 fs/jffs2/readinode.c ret = jffs2_build_inode_fragtree(c, f, &rii); f 1145 fs/jffs2/readinode.c f->inocache->ino, ret); f 1146 fs/jffs2/readinode.c if (f->inocache->state == INO_STATE_READING) f 1147 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); f 1160 fs/jffs2/readinode.c f->metadata = rii.mdata_tn->fn; f 1168 fs/jffs2/readinode.c f->dents = rii.fds; f 1170 fs/jffs2/readinode.c jffs2_dbg_fragtree_paranoia_check_nolock(f); f 1174 fs/jffs2/readinode.c if (f->inocache->ino != 1) { f 1175 fs/jffs2/readinode.c JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino); f 1177 fs/jffs2/readinode.c if (f->inocache->state == INO_STATE_READING) f 1178 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); f 1189 fs/jffs2/readinode.c if (f->inocache->state == INO_STATE_READING) f 1190 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); f 1199 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 1200 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); f 1207 fs/jffs2/readinode.c f->inocache->ino, ref_offset(rii.latest_ref)); f 1208 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 1209 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); f 1225 fs/jffs2/readinode.c new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize)); f 1228 fs/jffs2/readinode.c f->inocache->ino, je32_to_cpu(latest_node->isize), new_size); f 1241 fs/jffs2/readinode.c if (f->inocache->state != INO_STATE_CHECKING) { f 1245 fs/jffs2/readinode.c f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); f 1246 fs/jffs2/readinode.c if (!f->target) { f 1248 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 1249 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); f 1254 fs/jffs2/readinode.c je32_to_cpu(latest_node->csize), &retlen, (char *)f->target); f 1259 fs/jffs2/readinode.c kfree(f->target); f 1260 fs/jffs2/readinode.c f->target = NULL; f 1261 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 1262 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); f 1266 fs/jffs2/readinode.c f->target[je32_to_cpu(latest_node->csize)] = '\0'; f 1267 fs/jffs2/readinode.c dbg_readinode("symlink's target '%s' cached\n", f->target); f 1276 fs/jffs2/readinode.c if (f->metadata) { f 1278 fs/jffs2/readinode.c f->inocache->ino, jemode_to_cpu(latest_node->mode)); f 1279 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 1280 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); f 1283 fs/jffs2/readinode.c if (!frag_first(&f->fragtree)) { f 1285 fs/jffs2/readinode.c f->inocache->ino, jemode_to_cpu(latest_node->mode)); f 1286 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 1287 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); f 1291 fs/jffs2/readinode.c if (frag_next(frag_first(&f->fragtree))) { f 1293 fs/jffs2/readinode.c f->inocache->ino, jemode_to_cpu(latest_node->mode)); f 1295 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 1296 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); f 1300 fs/jffs2/readinode.c f->metadata = frag_first(&f->fragtree)->node; f 1301 fs/jffs2/readinode.c jffs2_free_node_frag(frag_first(&f->fragtree)); f 1302 fs/jffs2/readinode.c f->fragtree = RB_ROOT; f 1305 fs/jffs2/readinode.c if (f->inocache->state == INO_STATE_READING) f 1306 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT); f 1319 fs/jffs2/readinode.c f->inocache = jffs2_get_ino_cache(c, ino); f 1321 fs/jffs2/readinode.c if (f->inocache) { f 1323 fs/jffs2/readinode.c switch(f->inocache->state) { f 1326 fs/jffs2/readinode.c f->inocache->state = INO_STATE_READING; f 1334 fs/jffs2/readinode.c dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state); f 1343 fs/jffs2/readinode.c JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); f 1345 fs/jffs2/readinode.c f->inocache = NULL; f 1354 fs/jffs2/readinode.c if (!f->inocache && ino == 1) { f 1356 fs/jffs2/readinode.c f->inocache = jffs2_alloc_inode_cache(); f 1357 fs/jffs2/readinode.c if (!f->inocache) { f 1362 fs/jffs2/readinode.c memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); f 1363 fs/jffs2/readinode.c f->inocache->ino = f->inocache->pino_nlink = 1; f 1364 fs/jffs2/readinode.c f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; f 1365 fs/jffs2/readinode.c f->inocache->state = INO_STATE_READING; f 1366 fs/jffs2/readinode.c jffs2_add_ino_cache(c, f->inocache); f 1368 fs/jffs2/readinode.c if (!f->inocache) { f 1373 fs/jffs2/readinode.c return jffs2_do_read_inode_internal(c, f, latest_node); f 1379 fs/jffs2/readinode.c struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL); f 1382 fs/jffs2/readinode.c if (!f) f 1385 fs/jffs2/readinode.c mutex_init(&f->sem); f 1386 fs/jffs2/readinode.c mutex_lock(&f->sem); f 1387 fs/jffs2/readinode.c f->inocache = ic; f 1389 fs/jffs2/readinode.c ret = jffs2_do_read_inode_internal(c, f, &n); f 1391 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 1392 fs/jffs2/readinode.c jffs2_do_clear_inode(c, f); f 1394 fs/jffs2/readinode.c kfree (f); f 1403 fs/jffs2/readinode.c jffs2_clear_acl(f); f 1404 fs/jffs2/readinode.c jffs2_xattr_delete_inode(c, f->inocache); f 1405 fs/jffs2/readinode.c mutex_lock(&f->sem); f 1406 fs/jffs2/readinode.c deleted = f->inocache && !f->inocache->pino_nlink; f 1408 fs/jffs2/readinode.c if (f->inocache && f->inocache->state != INO_STATE_CHECKING) f 1409 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING); f 1411 fs/jffs2/readinode.c if (f->metadata) { f 1413 fs/jffs2/readinode.c jffs2_mark_node_obsolete(c, f->metadata->raw); f 1414 fs/jffs2/readinode.c jffs2_free_full_dnode(f->metadata); f 1417 fs/jffs2/readinode.c jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); f 1419 fs/jffs2/readinode.c if (f->target) { f 1420 fs/jffs2/readinode.c kfree(f->target); f 1421 fs/jffs2/readinode.c f->target = NULL; f 1424 fs/jffs2/readinode.c fds = f->dents; f 1431 fs/jffs2/readinode.c if (f->inocache && f->inocache->state != INO_STATE_CHECKING) { f 1432 fs/jffs2/readinode.c jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); f 1433 fs/jffs2/readinode.c if (f->inocache->nodes == (void *)f->inocache) f 1434 fs/jffs2/readinode.c jffs2_del_ino_cache(c, f->inocache); f 1437 fs/jffs2/readinode.c mutex_unlock(&f->sem); f 34 fs/jffs2/super.c struct jffs2_inode_info *f; f 36 fs/jffs2/super.c f = kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL); f 37 fs/jffs2/super.c if (!f) f 39 fs/jffs2/super.c return &f->vfs_inode; f 49 fs/jffs2/super.c struct jffs2_inode_info *f = foo; f 51 fs/jffs2/super.c mutex_init(&f->sem); f 52 fs/jffs2/super.c inode_init_once(&f->vfs_inode); f 34 fs/jffs2/symlink.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); f 35 fs/jffs2/symlink.c char *p = (char *)f->target; f 54 fs/jffs2/symlink.c D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target)); f 192 fs/jffs2/wbuf.c if (f->metadata && f->metadata->raw == raw) { f 193 fs/jffs2/wbuf.c dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata); f 194 fs/jffs2/wbuf.c return &f->metadata->raw; f 196 fs/jffs2/wbuf.c frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset)); f 207 fs/jffs2/wbuf.c for (fd = f->dents; fd; fd = fd->next) { f 460 fs/jffs2/wbuf.c struct jffs2_inode_info *f = NULL; f 497 fs/jffs2/wbuf.c f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink); f 498 fs/jffs2/wbuf.c if (IS_ERR(f)) { f 501 fs/jffs2/wbuf.c ic->ino, PTR_ERR(f)); f 509 fs/jffs2/wbuf.c adjust_ref = jffs2_incore_replace_raw(c, f, raw, f 525 fs/jffs2/wbuf.c if (f) f 526 fs/jffs2/wbuf.c jffs2_gc_release_inode(c, f); f 34 fs/jffs2/write.c f->inocache = ic; f 35 fs/jffs2/write.c f->inocache->pino_nlink = 1; /* Will be overwritten shortly for directories */ f 36 fs/jffs2/write.c f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; f 37 fs/jffs2/write.c f->inocache->state = INO_STATE_PRESENT; f 39 fs/jffs2/write.c jffs2_add_ino_cache(c, f->inocache); f 40 fs/jffs2/write.c D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); f 41 fs/jffs2/write.c ri->ino = cpu_to_je32(f->inocache->ino); f 49 fs/jffs2/write.c f->highest_version = 1; f 50 fs/jffs2/write.c ri->version = cpu_to_je32(f->highest_version); f 97 fs/jffs2/write.c if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { f 101 fs/jffs2/write.c je32_to_cpu(ri->version), f->highest_version)); f 102 fs/jffs2/write.c ri->version = cpu_to_je32(++f->highest_version); f 107 fs/jffs2/write.c (alloc_mode==ALLOC_GC)?0:f->inocache->ino); f 141 fs/jffs2/write.c mutex_unlock(&f->sem); f 146 fs/jffs2/write.c mutex_lock(&f->sem); f 176 fs/jffs2/write.c fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache); f 251 fs/jffs2/write.c if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { f 255 fs/jffs2/write.c je32_to_cpu(rd->version), f->highest_version)); f 256 fs/jffs2/write.c rd->version = cpu_to_je32(++f->highest_version); f 289 fs/jffs2/write.c mutex_unlock(&f->sem); f 294 fs/jffs2/write.c mutex_lock(&f->sem); f 312 fs/jffs2/write.c PAD(sizeof(*rd)+namelen), f->inocache); f 338 fs/jffs2/write.c f->inocache->ino, offset, writelen)); f 357 fs/jffs2/write.c mutex_lock(&f->sem); f 361 fs/jffs2/write.c comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); f 368 fs/jffs2/write.c ri->ino = cpu_to_je32(f->inocache->ino); f 369 fs/jffs2/write.c ri->version = cpu_to_je32(++f->highest_version); f 379 fs/jffs2/write.c fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY); f 385 fs/jffs2/write.c mutex_unlock(&f->sem); f 395 fs/jffs2/write.c ret = jffs2_add_full_dnode_to_inode(c, f, fn); f 396 fs/jffs2/write.c if (f->metadata) { f 397 fs/jffs2/write.c jffs2_mark_node_obsolete(c, f->metadata->raw); f 398 fs/jffs2/write.c jffs2_free_full_dnode(f->metadata); f 399 fs/jffs2/write.c f->metadata = NULL; f 407 fs/jffs2/write.c mutex_unlock(&f->sem); f 411 fs/jffs2/write.c mutex_unlock(&f->sem); f 445 fs/jffs2/write.c mutex_lock(&f->sem); f 450 fs/jffs2/write.c fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); f 458 fs/jffs2/write.c mutex_unlock(&f->sem); f 465 fs/jffs2/write.c f->metadata = fn; f 467 fs/jffs2/write.c mutex_unlock(&f->sem); f 470 fs/jffs2/write.c ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode); f 473 fs/jffs2/write.c ret = jffs2_init_acl_post(&f->vfs_inode); f 953 fs/jffs2/xattr.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 955 fs/jffs2/xattr.c struct jffs2_inode_cache *ic = f->inocache; f 1014 fs/jffs2/xattr.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 1016 fs/jffs2/xattr.c struct jffs2_inode_cache *ic = f->inocache; f 1076 fs/jffs2/xattr.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); f 1078 fs/jffs2/xattr.c struct jffs2_inode_cache *ic = f->inocache; f 1348 fs/jfs/jfs_dtree.c struct dtslot *f; f 1455 fs/jfs/jfs_dtree.c f = &rp->slot[fsi]; f 1456 fs/jfs/jfs_dtree.c for (fsi++; fsi < rp->header.maxslot; f++, fsi++) f 1457 fs/jfs/jfs_dtree.c f->next = fsi; f 1458 fs/jfs/jfs_dtree.c f->next = -1; f 1572 fs/jfs/jfs_dtree.c f = &rp->slot[fsi]; f 1573 fs/jfs/jfs_dtree.c for (fsi++; fsi < rp->header.maxslot; f++, fsi++) f 1574 fs/jfs/jfs_dtree.c f->next = fsi; f 1575 fs/jfs/jfs_dtree.c f->next = -1; f 1654 fs/jfs/jfs_dtree.c struct dtslot *f; f 1783 fs/jfs/jfs_dtree.c f = &sp->slot[fsi]; f 1785 fs/jfs/jfs_dtree.c for (n = 0; n < oldstblsize; n++, fsi++, f++) { f 1786 fs/jfs/jfs_dtree.c f->next = last; f 1797 fs/jfs/jfs_dtree.c f = &sp->slot[fsi]; f 1798 fs/jfs/jfs_dtree.c for (fsi++; fsi < sp->header.maxslot; f++, fsi++) f 1799 fs/jfs/jfs_dtree.c f->next = fsi; f 1800 fs/jfs/jfs_dtree.c f->next = -1; f 1808 fs/jfs/jfs_dtree.c f = &sp->slot[fsi]; f 1809 fs/jfs/jfs_dtree.c fsi = f->next; f 1812 fs/jfs/jfs_dtree.c f->next = n; f 1885 fs/jfs/jfs_dtree.c struct dtslot *f; f 1967 fs/jfs/jfs_dtree.c f = &rp->slot[fsi]; f 1968 fs/jfs/jfs_dtree.c for (fsi++; fsi < rp->header.maxslot; f++, fsi++) f 1969 fs/jfs/jfs_dtree.c f->next = fsi; f 1970 fs/jfs/jfs_dtree.c f->next = -1; f 1980 fs/jfs/jfs_dtree.c f = &rp->slot[fsi]; f 1981 fs/jfs/jfs_dtree.c fsi = f->next; f 1984 fs/jfs/jfs_dtree.c f->next = n; f 2054 fs/jfs/jfs_dtree.c f = &sp->slot[fsi]; f 2057 fs/jfs/jfs_dtree.c for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++) f 2058 fs/jfs/jfs_dtree.c f->next = fsi; f 2059 fs/jfs/jfs_dtree.c f->next = -1; f 2828 fs/jfs/jfs_dtree.c struct dtslot *f; f 2892 fs/jfs/jfs_dtree.c f = &p->slot[fsi]; f 2895 fs/jfs/jfs_dtree.c for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++) f 2896 fs/jfs/jfs_dtree.c f->next = fsi; f 2897 fs/jfs/jfs_dtree.c f->next = -1; f 38 fs/lockd/svcsubs.c u32 *fhp = (u32*)f->data; f 70 fs/lockd/svcsubs.c tmp += f->data[i]; f 92 fs/lockd/svcsubs.c nlm_debug_print_fh("nlm_lookup_file", f); f 94 fs/lockd/svcsubs.c hash = file_hash(f); f 100 fs/lockd/svcsubs.c if (!nfs_compare_fh(&file->f_handle, f)) f 103 fs/lockd/svcsubs.c nlm_debug_print_fh("creating file for", f); f 110 fs/lockd/svcsubs.c memcpy(&file->f_handle, f, sizeof(struct nfs_fh)); f 121 fs/lockd/svcsubs.c if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) { f 92 fs/lockd/xdr.c f->size = NFS2_FHSIZE; f 93 fs/lockd/xdr.c memset(f->data, 0, sizeof(f->data)); f 94 fs/lockd/xdr.c memcpy(f->data, p, NFS2_FHSIZE); f 102 fs/lockd/xdr.c memcpy(p, f->data, NFS2_FHSIZE); f 87 fs/lockd/xdr4.c memset(f->data, 0, sizeof(f->data)); f 88 fs/lockd/xdr4.c f->size = ntohl(*p++); f 89 fs/lockd/xdr4.c if (f->size > NFS_MAXFHSIZE) { f 91 fs/lockd/xdr4.c f->size, NFS_MAXFHSIZE); f 94 fs/lockd/xdr4.c memcpy(f->data, p, f->size); f 95 fs/lockd/xdr4.c return p + XDR_QUADLEN(f->size); f 101 fs/lockd/xdr4.c *p++ = htonl(f->size); f 102 fs/lockd/xdr4.c if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */ f 103 fs/lockd/xdr4.c memcpy(p, f->data, f->size); f 104 fs/lockd/xdr4.c return p + XDR_QUADLEN(f->size); f 1774 fs/locks.c struct file *f; f 1834 fs/locks.c f = fcheck(fd); f 1836 fs/locks.c if (!error && f != filp && flock.l_type != F_UNLCK) { f 1892 fs/locks.c struct file *f; f 1947 fs/locks.c f = fcheck(fd); f 1949 fs/locks.c if (!error && f != filp && flock.l_type != F_UNLCK) { f 2097 fs/locks.c seq_printf(f, "%d:%s ", id, pfx); f 2099 fs/locks.c seq_printf(f, "%6s %s ", f 2105 fs/locks.c seq_printf(f, "FLOCK MSNFS "); f 2107 fs/locks.c seq_printf(f, "FLOCK ADVISORY "); f 2110 fs/locks.c seq_printf(f, "LEASE "); f 2112 fs/locks.c seq_printf(f, "BREAKING "); f 2114 fs/locks.c seq_printf(f, "ACTIVE "); f 2116 fs/locks.c seq_printf(f, "BREAKER "); f 2118 fs/locks.c seq_printf(f, "UNKNOWN UNKNOWN "); f 2121 fs/locks.c seq_printf(f, "%s ", f 2126 fs/locks.c seq_printf(f, "%s ", f 2133 fs/locks.c seq_printf(f, "%d %s:%ld ", fl_pid, f 2137 fs/locks.c seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, f 2142 fs/locks.c seq_printf(f, "%d <none>:0 ", fl_pid); f 2146 fs/locks.c seq_printf(f, "%Ld EOF\n", fl->fl_start); f 2148 fs/locks.c seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); f 2150 fs/locks.c seq_printf(f, "0 EOF\n"); f 2160 fs/locks.c lock_get_status(f, fl, (long)f->private, ""); f 2163 fs/locks.c lock_get_status(f, bfl, (long)f->private, " ->"); f 2165 fs/locks.c f->private++; f 2172 fs/locks.c f->private = (void *)1; f 43 fs/mbcache.c printk(KERN_DEBUG f); \ f 54 fs/mbcache.c printk(KERN_ERR f); \ f 1450 fs/namei.c if (f & O_NOFOLLOW) f 1453 fs/namei.c if (f & O_DIRECTORY) f 1837 fs/namespace.c const char __user *f = from; f 1844 fs/namespace.c if (__get_user(c, f)) { f 1849 fs/namespace.c f++; f 17 fs/nfsd/auth.c struct exp_flavor_info *f; f 20 fs/nfsd/auth.c for (f = exp->ex_flavors; f < end; f++) { f 21 fs/nfsd/auth.c if (f->pseudoflavor == rqstp->rq_flavor) f 22 fs/nfsd/auth.c return f->flags; f 455 fs/nfsd/export.c struct exp_flavor_info *f; f 463 fs/nfsd/export.c for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) { f 464 fs/nfsd/export.c err = get_int(mesg, &f->pseudoflavor); f 476 fs/nfsd/export.c if (f->pseudoflavor < 0) f 478 fs/nfsd/export.c err = get_int(mesg, &f->flags); f 482 fs/nfsd/export.c if (~NFSEXP_SECINFO_FLAGS & (f->flags ^ exp->ex_flags)) f 1202 fs/nfsd/export.c memcpy(f, &fh.fh_handle, sizeof(struct knfsd_fh)); f 1228 fs/nfsd/export.c struct exp_flavor_info *f; f 1235 fs/nfsd/export.c for (f = exp->ex_flavors; f < end; f++) { f 1236 fs/nfsd/export.c if (f->pseudoflavor == rqstp->rq_flavor) f 1471 fs/nfsd/export.c struct exp_flavor_info *f; f 1477 fs/nfsd/export.c for (f = exp->ex_flavors; f < end; f++) { f 1478 fs/nfsd/export.c if (first || f->flags != lastflags) { f 1481 fs/nfsd/export.c seq_printf(m, ",sec=%d", f->pseudoflavor); f 1482 fs/nfsd/export.c lastflags = f->flags; f 1484 fs/nfsd/export.c seq_printf(m, ":%d", f->pseudoflavor); f 40 fs/nfsd/lockd.c fh.fh_handle.fh_size = f->size; f 41 fs/nfsd/lockd.c memcpy((char*)&fh.fh_handle.fh_base, f->data, f->size); f 155 fs/nfsd/nfs3xdr.c u64 f; f 166 fs/nfsd/nfs3xdr.c f = ((u64*)fhp->fh_export->ex_uuid)[0]; f 167 fs/nfsd/nfs3xdr.c f ^= ((u64*)fhp->fh_export->ex_uuid)[1]; f 168 fs/nfsd/nfs3xdr.c p = xdr_encode_hyper(p, f); f 238 fs/nfsd/nfs4recover.c status = f(dir, child->dentry); f 157 fs/nfsd/nfsxdr.c u32 f; f 187 fs/nfsd/nfsxdr.c f = ((u32*)fhp->fh_export->ex_uuid)[0]; f 188 fs/nfsd/nfsxdr.c f ^= ((u32*)fhp->fh_export->ex_uuid)[1]; f 189 fs/nfsd/nfsxdr.c f ^= ((u32*)fhp->fh_export->ex_uuid)[2]; f 190 fs/nfsd/nfsxdr.c f ^= ((u32*)fhp->fh_export->ex_uuid)[3]; f 191 fs/nfsd/nfsxdr.c *p++ = htonl(f); f 48 fs/ntfs/debug.h __ntfs_debug(__FILE__, __LINE__, __func__, f, ##a) f 61 fs/ntfs/debug.h #define ntfs_warning(sb, f, a...) __ntfs_warning(__func__, sb, f, ##a) f 65 fs/ntfs/debug.h #define ntfs_error(sb, f, a...) __ntfs_error(__func__, sb, f, ##a) f 445 fs/ocfs2/cluster/heartbeat.c struct o2hb_callback_func *f; f 448 fs/ocfs2/cluster/heartbeat.c f = list_entry(iter, struct o2hb_callback_func, hc_item); f 449 fs/ocfs2/cluster/heartbeat.c mlog(ML_HEARTBEAT, "calling funcs %p\n", f); f 450 fs/ocfs2/cluster/heartbeat.c (f->hc_func)(node, idx, f->hc_data); f 254 fs/ocfs2/dlm/dlmcommon.h i->func = f; f 1822 fs/ocfs2/dlm/dlmdomain.c cb->ec_func = f; f 800 fs/open.c f->f_flags = flags; f 801 fs/open.c f->f_mode = ((flags+1) & O_ACCMODE) | FMODE_LSEEK | f 804 fs/open.c if (f->f_mode & FMODE_WRITE) { f 809 fs/open.c file_take_write(f); f 812 fs/open.c f->f_mapping = inode->i_mapping; f 813 fs/open.c f->f_path.dentry = dentry; f 814 fs/open.c f->f_path.mnt = mnt; f 815 fs/open.c f->f_pos = 0; f 816 fs/open.c f->f_op = fops_get(inode->i_fop); f 817 fs/open.c file_move(f, &inode->i_sb->s_files); f 819 fs/open.c error = security_dentry_open(f); f 823 fs/open.c if (!open && f->f_op) f 824 fs/open.c open = f->f_op->open; f 826 fs/open.c error = open(inode, f); f 831 fs/open.c f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); f 833 fs/open.c file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); f 836 fs/open.c if (f->f_flags & O_DIRECT) { f 837 fs/open.c if (!f->f_mapping->a_ops || f 838 fs/open.c ((!f->f_mapping->a_ops->direct_IO) && f 839 fs/open.c (!f->f_mapping->a_ops->get_xip_mem))) { f 840 fs/open.c fput(f); f 841 fs/open.c f = ERR_PTR(-EINVAL); f 845 fs/open.c return f; f 848 fs/open.c fops_put(f->f_op); f 849 fs/open.c if (f->f_mode & FMODE_WRITE) { f 858 fs/open.c file_reset_write(f); f 862 fs/open.c file_kill(f); f 863 fs/open.c f->f_path.dentry = NULL; f 864 fs/open.c f->f_path.mnt = NULL; f 866 fs/open.c put_filp(f); f 940 fs/open.c struct file *f; f 955 fs/open.c f = get_empty_filp(); f 956 fs/open.c if (f == NULL) { f 962 fs/open.c return __dentry_open(dentry, mnt, flags, f, NULL); f 1018 fs/open.c struct file *f = do_filp_open(dfd, tmp, flags, mode); f 1019 fs/open.c if (IS_ERR(f)) { f 1021 fs/open.c fd = PTR_ERR(f); f 1023 fs/open.c fsnotify_open(f->f_path.dentry); f 1024 fs/open.c fd_install(fd, f); f 67 fs/openpromfs/inode.c struct property *prop = f->private; f 78 fs/openpromfs/inode.c seq_printf(f, "%s", (char *) pval); f 85 fs/openpromfs/inode.c seq_printf(f, " + "); f 92 fs/openpromfs/inode.c seq_printf(f, "%02x.", f 95 fs/openpromfs/inode.c seq_printf(f, "%02x", f 104 fs/openpromfs/inode.c seq_printf(f, "%08x.", f 107 fs/openpromfs/inode.c seq_printf(f, "%08x", f 113 fs/openpromfs/inode.c seq_printf(f, "\n"); f 44 fs/partitions/ldm.c #define ldm_debug(f, a...) _ldm_printk (KERN_DEBUG, __func__, f, ##a) f 47 fs/partitions/ldm.c #define ldm_crit(f, a...) _ldm_printk (KERN_CRIT, __func__, f, ##a) f 48 fs/partitions/ldm.c #define ldm_error(f, a...) _ldm_printk (KERN_ERR, __func__, f, ##a) f 49 fs/partitions/ldm.c #define ldm_info(f, a...) _ldm_printk (KERN_INFO, __func__, f, ##a) f 1291 fs/partitions/ldm.c struct frag *f; f 1306 fs/partitions/ldm.c f = list_entry (item, struct frag, list); f 1307 fs/partitions/ldm.c if (f->group == group) f 1311 fs/partitions/ldm.c f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL); f 1312 fs/partitions/ldm.c if (!f) { f 1317 fs/partitions/ldm.c f->group = group; f 1318 fs/partitions/ldm.c f->num = num; f 1319 fs/partitions/ldm.c f->rec = rec; f 1320 fs/partitions/ldm.c f->map = 0xFF << num; f 1322 fs/partitions/ldm.c list_add_tail (&f->list, frags); f 1324 fs/partitions/ldm.c if (f->map & (1 << rec)) { f 1326 fs/partitions/ldm.c f->map &= 0x7F; /* Mark the group as broken */ f 1330 fs/partitions/ldm.c f->map |= (1 << rec); f 1336 fs/partitions/ldm.c memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); f 1372 fs/partitions/ldm.c struct frag *f; f 1378 fs/partitions/ldm.c f = list_entry (item, struct frag, list); f 1380 fs/partitions/ldm.c if (f->map != 0xFF) { f 1382 fs/partitions/ldm.c f->group, f->map); f 1386 fs/partitions/ldm.c if (!ldm_ldmdb_add (f->data, f->num*ldb->vm.vblk_size, ldb)) f 922 fs/pipe.c struct file *f; f 946 fs/pipe.c f = alloc_file(pipe_mnt, dentry, FMODE_WRITE, &write_pipefifo_fops); f 947 fs/pipe.c if (!f) f 949 fs/pipe.c f->f_mapping = inode->i_mapping; f 951 fs/pipe.c f->f_flags = O_WRONLY | (flags & O_NONBLOCK); f 952 fs/pipe.c f->f_version = 0; f 954 fs/pipe.c return f; f 970 fs/pipe.c free_pipe_info(f->f_dentry->d_inode); f 971 fs/pipe.c path_put(&f->f_path); f 972 fs/pipe.c put_filp(f); f 977 fs/pipe.c struct file *f = get_empty_filp(); f 978 fs/pipe.c if (!f) f 982 fs/pipe.c f->f_path = wrf->f_path; f 984 fs/pipe.c f->f_mapping = wrf->f_path.dentry->d_inode->i_mapping; f 986 fs/pipe.c f->f_pos = 0; f 987 fs/pipe.c f->f_flags = O_RDONLY | (flags & O_NONBLOCK); f 988 fs/pipe.c f->f_op = &read_pipefifo_fops; f 989 fs/pipe.c f->f_mode = FMODE_READ; f 990 fs/pipe.c f->f_version = 0; f 992 fs/pipe.c return f; f 309 fs/proc/proc_misc.c seq_printf(f, "Character devices:\n"); f 310 fs/proc/proc_misc.c chrdev_show(f, i); f 316 fs/proc/proc_misc.c seq_printf(f, "\nBlock devices:\n"); f 317 fs/proc/proc_misc.c blkdev_show(f, i); f 47 fs/proc/proc_net.c p = __seq_open_private(f, ops, size); f 87 fs/proc/proc_net.c seq = f->private_data; f 90 fs/proc/proc_net.c seq_release_private(ino, f); f 97 fs/proc/proc_net.c struct seq_file *seq = f->private_data; f 99 fs/proc/proc_net.c return single_release(ino, f); f 857 fs/reiserfs/fix_node.c struct buffer_head *l, *f; f 860 fs/reiserfs/fix_node.c if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL || f 864 fs/reiserfs/fix_node.c if (f == l) f 868 fs/reiserfs/fix_node.c f = l; f 871 fs/reiserfs/fix_node.c return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order))); f 879 fs/reiserfs/fix_node.c struct buffer_head *r, *f; f 882 fs/reiserfs/fix_node.c if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL || f 886 fs/reiserfs/fix_node.c if (f == r) f 890 fs/reiserfs/fix_node.c f = r; f 893 fs/reiserfs/fix_node.c return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order))); f 347 fs/seq_file.c va_start(args, f); f 348 fs/seq_file.c len = vsnprintf(m->buf + m->count, m->size - m->count, f, args); f 533 fs/seq_file.c rc = seq_open(f, ops); f 537 fs/seq_file.c seq = f->private_data; f 14 fs/smbfs/smb_debug.h # define PARANOIA(f, a...) printk(KERN_NOTICE "%s: " f, __func__ , ## a) f 21 fs/smbfs/smb_debug.h # define VERBOSE(f, a...) printk(KERN_DEBUG "%s: " f, __func__ , ## a) f 31 fs/smbfs/smb_debug.h #define DEBUG1(f, a...) printk(KERN_DEBUG "%s: " f, __func__ , ## a) f 100 fs/stat.c struct file *f = fget(fd); f 103 fs/stat.c if (f) { f 104 fs/stat.c error = vfs_getattr(f->f_path.mnt, f->f_path.dentry, stat); f 105 fs/stat.c fput(f); f 571 fs/super.c struct file *f; f 575 fs/super.c list_for_each_entry(f, &sb->s_files, f_u.fu_list) { f 577 fs/super.c if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) f 579 fs/super.c if (!file_count(f)) f 581 fs/super.c if (!(f->f_mode & FMODE_WRITE)) f 583 fs/super.c f->f_mode &= ~FMODE_WRITE; f 584 fs/super.c if (file_check_writeable(f) != 0) f 586 fs/super.c file_release_write(f); f 587 fs/super.c mnt = mntget(f->f_path.mnt); f 726 fs/ubifs/budget.c int divisor, factor, f; f 741 fs/ubifs/budget.c f = c->fanout > 3 ? c->fanout >> 1 : 2; f 744 fs/ubifs/budget.c divisor += (c->max_idx_node_sz * 3) / (f - 1); f 431 fs/ubifs/key.h const union ubifs_key *f = from; f 433 fs/ubifs/key.h to->u32[0] = le32_to_cpu(f->j32[0]); f 434 fs/ubifs/key.h to->u32[1] = le32_to_cpu(f->j32[1]); f 26 fs/udf/udfdecl.h printk(f, ##a); \ f 33 fs/udf/udfdecl.h printk(KERN_INFO "UDF-fs INFO " f, ##a); f 70 fs/ufs/ufs.h printk (f, ## a); \ f 296 fs/xattr.c struct file *f; f 300 fs/xattr.c f = fget(fd); f 301 fs/xattr.c if (!f) f 303 fs/xattr.c dentry = f->f_path.dentry; f 305 fs/xattr.c error = mnt_want_write(f->f_path.mnt); f 308 fs/xattr.c mnt_drop_write(f->f_path.mnt); f 310 fs/xattr.c fput(f); f 385 fs/xattr.c struct file *f; f 388 fs/xattr.c f = fget(fd); f 389 fs/xattr.c if (!f) f 391 fs/xattr.c audit_inode(NULL, f->f_path.dentry); f 392 fs/xattr.c error = getxattr(f->f_path.dentry, name, value, size); f 393 fs/xattr.c fput(f); f 458 fs/xattr.c struct file *f; f 461 fs/xattr.c f = fget(fd); f 462 fs/xattr.c if (!f) f 464 fs/xattr.c audit_inode(NULL, f->f_path.dentry); f 465 fs/xattr.c error = listxattr(f->f_path.dentry, list, size); f 466 fs/xattr.c fput(f); f 527 fs/xattr.c struct file *f; f 531 fs/xattr.c f = fget(fd); f 532 fs/xattr.c if (!f) f 534 fs/xattr.c dentry = f->f_path.dentry; f 536 fs/xattr.c error = mnt_want_write(f->f_path.mnt); f 539 fs/xattr.c mnt_drop_write(f->f_path.mnt); f 541 fs/xattr.c fput(f); f 129 fs/xfs/linux-2.6/xfs_linux.h #define current_test_flags(f) (current->flags & (f)) f 131 fs/xfs/linux-2.6/xfs_linux.h (*(sp) = current->flags, current->flags |= (f)) f 133 fs/xfs/linux-2.6/xfs_linux.h (*(sp) = current->flags, current->flags &= ~(f)) f 135 fs/xfs/linux-2.6/xfs_linux.h (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) f 82 fs/xfs/linux-2.6/xfs_vnode.c xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); f 60 fs/xfs/xfs_alloc.c xfs_alloc_trace_free(__func__, s, mp, a, b, x, f, __LINE__) f 62 fs/xfs/xfs_alloc.c xfs_alloc_trace_modagf(__func__, s, mp, a, f, __LINE__) f 199 fs/xfs/xfs_bmap_btree.c i, (xfs_dfsbno_t)f >> 32, (int)f, o >> 32, f 222 fs/xfs/xfs_bmap_btree.c d = (xfs_dfsbno_t)f; f 286 fs/xfs/xfs_bmap_btree.c xfs_bmbt_trace_argifk(__func__, c, i, f, s, __LINE__) f 288 fs/xfs/xfs_bmap_btree.c xfs_bmbt_trace_argifr(__func__, c, i, f, r, __LINE__) f 2367 fs/xfs/xfs_da_btree.c uint f; f 2382 fs/xfs/xfs_da_btree.c f = off; f 2383 fs/xfs/xfs_da_btree.c l = f + XFS_BUF_COUNT(bp) - 1; f 2384 fs/xfs/xfs_da_btree.c if (f < first) f 2385 fs/xfs/xfs_da_btree.c f = first; f 2388 fs/xfs/xfs_da_btree.c if (f <= l) f 2389 fs/xfs/xfs_da_btree.c xfs_trans_log_buf(tp, bp, f - off, l - off); f 168 fs/xfs/xfs_dmapi.h #define AT_DELAY_FLAG(f) ((f & XFS_ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0) f 174 fs/xfs/xfs_error.h ((f & XFS_MFSI_QUIET)? (void)0 : cmn_err(CE_WARN, "XFS: " fmt, ## args)) f 427 fs/xfs/xfs_mount.c xfs_sb_field_t f; f 436 fs/xfs/xfs_mount.c f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); f 437 fs/xfs/xfs_mount.c first = xfs_sb_info[f].offset; f 438 fs/xfs/xfs_mount.c size = xfs_sb_info[f + 1].offset - first; f 440 fs/xfs/xfs_mount.c ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); f 442 fs/xfs/xfs_mount.c if (size == 1 || xfs_sb_info[f].type == 1) { f 463 fs/xfs/xfs_mount.c fields &= ~(1LL << f); f 1438 fs/xfs/xfs_mount.c xfs_sb_field_t f; f 1454 fs/xfs/xfs_mount.c f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); f 1455 fs/xfs/xfs_mount.c ASSERT((1LL << f) & XFS_SB_MOD_BITS); f 1456 fs/xfs/xfs_mount.c first = xfs_sb_info[f].offset; f 1458 fs/xfs/xfs_mount.c f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); f 1459 fs/xfs/xfs_mount.c ASSERT((1LL << f) & XFS_SB_MOD_BITS); f 1460 fs/xfs/xfs_mount.c last = xfs_sb_info[f + 1].offset - 1; f 440 fs/xfs/xfs_mount.h xfs_do_force_shutdown(m, f, __FILE__, __LINE__) f 359 fs/xfs/xfs_quota.h f | XFS_QMOPT_RES_REGBLKS) f 362 fs/xfs/xfs_quota.h f | XFS_QMOPT_RES_REGBLKS) f 51 include/acpi/acinterp.h #define ACPI_EXD_OFFSET(f) (u8) ACPI_OFFSET (union acpi_operand_object,f) f 52 include/acpi/acinterp.h #define ACPI_EXD_NSOFFSET(f) (u8) ACPI_OFFSET (struct acpi_namespace_node,f) f 90 include/acpi/acmacros.h #define ACPI_OFFSET(d,f) (acpi_size) ACPI_PTR_DIFF (&(((d *)0)->f),(void *) NULL) f 399 include/acpi/acmacros.h #define ARGI_LIST6(a,b,c,d,e,f) (ARG_1(f)|ARG_2(e)|ARG_3(d)|ARG_4(c)|ARG_5(b)|ARG_6(a)) f 406 include/acpi/acmacros.h #define ARGP_LIST6(a,b,c,d,e,f) (ARG_1(a)|ARG_2(b)|ARG_3(c)|ARG_4(d)|ARG_5(e)|ARG_6(f)) f 106 include/acpi/acresrc.h #define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f) f 107 include/acpi/acresrc.h #define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f) f 289 include/acpi/actbl.h #define ACPI_FADT_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_table_fadt, f) f 13 include/asm-cris/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) f 10 include/asm-frv/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) f 177 include/asm-frv/math-emu.h .macro fp_get_instr_data f,s,dest,label f 201 include/asm-frv/registers.h struct user_fpmedia_regs f; f 23 include/asm-generic/bitops/atomic.h local_irq_save(f); \ f 30 include/asm-generic/bitops/atomic.h local_irq_restore(f); \ f 35 include/asm-generic/bitops/atomic.h # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) f 36 include/asm-generic/bitops/atomic.h # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) f 19 include/asm-generic/dma-mapping-broken.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) f 267 include/asm-generic/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) f 177 include/asm-m68k/math-emu.h .macro fp_get_instr_data f,s,dest,label f 26 include/asm-mn10300/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) f 36 include/asm-parisc/atomic.h local_irq_save(f); \ f 43 include/asm-parisc/atomic.h local_irq_restore(f); \ f 48 include/asm-parisc/atomic.h # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) f 49 include/asm-parisc/atomic.h # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) f 29 include/asm-parisc/system.h unsigned int f:1; f 95 include/asm-um/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) f 85 include/asm-x86/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) f 8 include/asm-x86/efi.h #define efi_call_phys0(f) efi_call_phys(f) f 9 include/asm-x86/efi.h #define efi_call_phys1(f, a1) efi_call_phys(f, a1) f 10 include/asm-x86/efi.h #define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2) f 11 include/asm-x86/efi.h #define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3) f 13 include/asm-x86/efi.h efi_call_phys(f, a1, a2, a3, a4) f 15 include/asm-x86/efi.h efi_call_phys(f, a1, a2, a3, a4, a5) f 17 include/asm-x86/efi.h efi_call_phys(f, a1, a2, a3, a4, a5, a6) f 23 include/asm-x86/efi.h ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) f 25 include/asm-x86/efi.h #define efi_call_virt0(f) efi_call_virt(f) f 26 include/asm-x86/efi.h #define efi_call_virt1(f, a1) efi_call_virt(f, a1) f 27 include/asm-x86/efi.h #define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2) f 28 include/asm-x86/efi.h #define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3) f 30 include/asm-x86/efi.h efi_call_virt(f, a1, a2, a3, a4) f 32 include/asm-x86/efi.h efi_call_virt(f, a1, a2, a3, a4, a5) f 34 include/asm-x86/efi.h efi_call_virt(f, a1, a2, a3, a4, a5, a6) f 53 include/asm-x86/efi.h efi_call0((void *)(f)) f 55 include/asm-x86/efi.h efi_call1((void *)(f), (u64)(a1)) f 57 include/asm-x86/efi.h efi_call2((void *)(f), (u64)(a1), (u64)(a2)) f 59 include/asm-x86/efi.h efi_call3((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3)) f 61 include/asm-x86/efi.h efi_call4((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ f 64 include/asm-x86/efi.h efi_call5((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ f 67 include/asm-x86/efi.h efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ f 71 include/asm-x86/efi.h efi_call0((void *)(efi.systab->runtime->f)) f 73 include/asm-x86/efi.h efi_call1((void *)(efi.systab->runtime->f), (u64)(a1)) f 75 include/asm-x86/efi.h efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2)) f 77 include/asm-x86/efi.h efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ f 80 include/asm-x86/efi.h efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ f 83 include/asm-x86/efi.h efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ f 86 include/asm-x86/efi.h efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ f 1458 include/asm-x86/paravirt.h unsigned long f; f 1463 include/asm-x86/paravirt.h : "=a"(f) f 1467 include/asm-x86/paravirt.h return f; f 1475 include/asm-x86/paravirt.h : "=a"(f) f 1476 include/asm-x86/paravirt.h : PV_FLAGS_ARG(f), f 1506 include/asm-x86/paravirt.h unsigned long f; f 1508 include/asm-x86/paravirt.h f = __raw_local_save_flags(); f 1510 include/asm-x86/paravirt.h return f; f 171 include/asm-x86/string_32.h ? __constant_memcpy3d((t), (f), (n)) \ f 172 include/asm-x86/string_32.h : __memcpy3d((t), (f), (n))) f 182 include/asm-x86/string_32.h ? __constant_memcpy((t), (f), (n)) \ f 183 include/asm-x86/string_32.h : __memcpy((t), (f), (n))) f 27 include/asm-xtensa/dma-mapping.h #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) f 571 include/drm/radeon_drm.h float f[5]; f 1198 include/linux/cdrom.h *f = lba % CD_FRAMES; f 1203 include/linux/cdrom.h return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET; f 281 include/linux/device-mapper.h printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) f 286 include/linux/device-mapper.h f "\n", ## arg); \ f 290 include/linux/device-mapper.h printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) f 295 include/linux/device-mapper.h f "\n", ## arg); \ f 299 include/linux/device-mapper.h printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg) f 303 include/linux/device-mapper.h printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \ f 309 include/linux/device-mapper.h printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg) f 313 include/linux/device-mapper.h printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \ f 51 include/linux/ext2_fs.h printk (f, ## a); \ f 47 include/linux/ext3_fs.h printk (KERN_DEBUG f, ## a); \ f 861 include/linux/fs.h WARN_ON(f->f_mnt_write_state != 0); f 862 include/linux/fs.h f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN; f 866 include/linux/fs.h f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED; f 870 include/linux/fs.h f->f_mnt_write_state = 0; f 878 include/linux/fs.h WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN); f 879 include/linux/fs.h WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED); f 883 include/linux/fs.h if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN) f 221 include/linux/hugetlb.h return hstate_inode(f->f_dentry->d_inode); f 65 include/linux/jbd.h printk (f, ## a); \ f 65 include/linux/jbd2.h printk (f, ## a); \ f 47 include/linux/kernel.h #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) f 88 include/linux/mca.h struct mca_bus_accessor_functions f; f 135 include/linux/mmc/sdio.h #define SDIO_FBR_BASE(f) ((f) * 0x100) /* base of function f's FBRs */ f 62 include/linux/mmc/sdio_func.h #define sdio_func_present(f) ((f)->state & SDIO_STATE_PRESENT) f 64 include/linux/mmc/sdio_func.h #define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT) f 66 include/linux/mmc/sdio_func.h #define sdio_func_id(f) ((f)->dev.bus_id) f 68 include/linux/mmc/sdio_func.h #define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) f 69 include/linux/mmc/sdio_func.h #define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) f 56 include/linux/mroute6.h #define IF_COPY(f, t) bcopy(f, t, sizeof(*(f))) f 780 include/linux/netdevice.h f(dev, &dev->_tx[i], arg); f 21 include/linux/netfilter/nf_conntrack_proto_gre.h #define GRE_IS_C(f) ((f)&GRE_FLAG_C) f 22 include/linux/netfilter/nf_conntrack_proto_gre.h #define GRE_IS_R(f) ((f)&GRE_FLAG_R) f 23 include/linux/netfilter/nf_conntrack_proto_gre.h #define GRE_IS_K(f) ((f)&GRE_FLAG_K) f 24 include/linux/netfilter/nf_conntrack_proto_gre.h #define GRE_IS_S(f) ((f)&GRE_FLAG_S) f 25 include/linux/netfilter/nf_conntrack_proto_gre.h #define GRE_IS_A(f) ((f)&GRE_FLAG_A) f 106 include/linux/tty.h #define _I_FLAG(tty, f) ((tty)->termios->c_iflag & (f)) f 107 include/linux/tty.h #define _O_FLAG(tty, f) ((tty)->termios->c_oflag & (f)) f 108 include/linux/tty.h #define _C_FLAG(tty, f) ((tty)->termios->c_cflag & (f)) f 109 include/linux/tty.h #define _L_FLAG(tty, f) ((tty)->termios->c_lflag & (f)) f 63 include/linux/workqueue.h .func = (f), \ f 68 include/linux/workqueue.h .work = __WORK_INITIALIZER((n).work, (f)), \ f 73 include/linux/workqueue.h struct work_struct n = __WORK_INITIALIZER(n, f) f 76 include/linux/workqueue.h struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) f 37 include/media/v4l2-ioctl.h struct v4l2_fmtdesc *f); f 39 include/media/v4l2-ioctl.h struct v4l2_fmtdesc *f); f 41 include/media/v4l2-ioctl.h struct v4l2_fmtdesc *f); f 43 include/media/v4l2-ioctl.h struct v4l2_fmtdesc *f); f 47 include/media/v4l2-ioctl.h struct v4l2_format *f); f 49 include/media/v4l2-ioctl.h struct v4l2_format *f); f 51 include/media/v4l2-ioctl.h struct v4l2_format *f); f 53 include/media/v4l2-ioctl.h struct v4l2_format *f); f 55 include/media/v4l2-ioctl.h struct v4l2_format *f); f 57 include/media/v4l2-ioctl.h struct v4l2_format *f); f 59 include/media/v4l2-ioctl.h struct v4l2_format *f); f 61 include/media/v4l2-ioctl.h struct v4l2_format *f); f 63 include/media/v4l2-ioctl.h struct v4l2_format *f); f 67 include/media/v4l2-ioctl.h struct v4l2_format *f); f 69 include/media/v4l2-ioctl.h struct v4l2_format *f); f 71 include/media/v4l2-ioctl.h struct v4l2_format *f); f 73 include/media/v4l2-ioctl.h struct v4l2_format *f); f 75 include/media/v4l2-ioctl.h struct v4l2_format *f); f 77 include/media/v4l2-ioctl.h struct v4l2_format *f); f 79 include/media/v4l2-ioctl.h struct v4l2_format *f); f 81 include/media/v4l2-ioctl.h struct v4l2_format *f); f 83 include/media/v4l2-ioctl.h struct v4l2_format *f); f 87 include/media/v4l2-ioctl.h struct v4l2_format *f); f 89 include/media/v4l2-ioctl.h struct v4l2_format *f); f 91 include/media/v4l2-ioctl.h struct v4l2_format *f); f 93 include/media/v4l2-ioctl.h struct v4l2_format *f); f 95 include/media/v4l2-ioctl.h struct v4l2_format *f); f 97 include/media/v4l2-ioctl.h struct v4l2_format *f); f 99 include/media/v4l2-ioctl.h struct v4l2_format *f); f 101 include/media/v4l2-ioctl.h struct v4l2_format *f); f 103 include/media/v4l2-ioctl.h struct v4l2_format *f); f 903 include/net/bluetooth/hci.h #define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12)) f 88 include/net/dn_fib.h #define DN_FIB_INFO(f) ((f)->fn_info) f 172 include/net/ieee80211.h #define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a) f 173 include/net/ieee80211.h #define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a) f 174 include/net/ieee80211.h #define IEEE80211_DEBUG_INFO(f, a...) IEEE80211_DEBUG(IEEE80211_DL_INFO, f, ## a) f 176 include/net/ieee80211.h #define IEEE80211_DEBUG_WX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_WX, f, ## a) f 177 include/net/ieee80211.h #define IEEE80211_DEBUG_SCAN(f, a...) IEEE80211_DEBUG(IEEE80211_DL_SCAN, f, ## a) f 178 include/net/ieee80211.h #define IEEE80211_DEBUG_STATE(f, a...) IEEE80211_DEBUG(IEEE80211_DL_STATE, f, ## a) f 179 include/net/ieee80211.h #define IEEE80211_DEBUG_MGMT(f, a...) IEEE80211_DEBUG(IEEE80211_DL_MGMT, f, ## a) f 180 include/net/ieee80211.h #define IEEE80211_DEBUG_FRAG(f, a...) IEEE80211_DEBUG(IEEE80211_DL_FRAG, f, ## a) f 181 include/net/ieee80211.h #define IEEE80211_DEBUG_DROP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a) f 182 include/net/ieee80211.h #define IEEE80211_DEBUG_TX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a) f 183 include/net/ieee80211.h #define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a) f 184 include/net/ieee80211.h #define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a) f 69 include/net/inet_frag.h inet_frag_destroy(q, f, NULL); f 67 include/pcmcia/mem_op.h __u16 __iomem *f = from; f 70 include/pcmcia/mem_op.h *t++ = __raw_readw(f++); f 72 include/pcmcia/mem_op.h *(__u8 *)t = readb(f); f 78 include/pcmcia/mem_op.h const __u16 *f = from; f 81 include/pcmcia/mem_op.h __raw_writew(*f++, t++); f 83 include/pcmcia/mem_op.h writeb(*(__u8 *)f, t); f 89 include/pcmcia/mem_op.h __u16 __iomem *f = from; f 92 include/pcmcia/mem_op.h put_user(__raw_readw(f++), t++); f 94 include/pcmcia/mem_op.h put_user(readb(f), (char __user *)t); f 99 include/pcmcia/mem_op.h __u16 __user *f = from; f 105 include/pcmcia/mem_op.h get_user(s, f++); f 109 include/pcmcia/mem_op.h get_user(c, (char __user *)f); f 7 include/video/cyblafb.h #define debug(f,a...) printk("%s:" f, __FUNCTION__ , ## a); f 12 include/video/cyblafb.h #define output(f, a...) printk("cyblafb: " f, ## a) f 88 include/video/gbe.h ( (v) = ((v)&~MASK(msb,lsb)) | (( (u32)(f)<<(lsb) ) & MASK(msb,lsb)) ) f 93 include/video/gbe.h SET((v), (f), GBE_##reg##_##field##_MSB, GBE_##reg##_##field##_LSB) f 22 include/video/sgivw.h #define SET(v, f, msb, lsb) ( (v) = ((v)&~MASK(msb,lsb)) | (( (u32)(f)<<(lsb) ) & MASK(msb,lsb)) ) f 25 include/video/sgivw.h #define SET_DBE_FIELD(reg, field, v, f) SET((v), (f), DBE_##reg##_##field##_MSB, DBE_##reg##_##field##_LSB) f 7 include/video/trident.h #define debug(f, a...) printk("%s:" f, __func__ , ## a); f 12 include/video/trident.h #define output(f, a...) pr_info("tridentfb: " f, ## a) f 136 kernel/auditfilter.c struct audit_field *f = &e->rule.fields[i]; f 137 kernel/auditfilter.c kfree(f->lsm_str); f 138 kernel/auditfilter.c security_audit_rule_free(f->lsm_rule); f 250 kernel/auditfilter.c krule->inode_f = f; f 429 kernel/auditfilter.c struct audit_field *f = &entry->rule.fields[i]; f 431 kernel/auditfilter.c f->op = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS); f 432 kernel/auditfilter.c f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS); f 433 kernel/auditfilter.c f->val = rule->values[i]; f 436 kernel/auditfilter.c switch(f->type) { f 457 kernel/auditfilter.c if (f->op == AUDIT_BIT_MASK || f 458 kernel/auditfilter.c f->op == AUDIT_BIT_TEST) { f 470 kernel/auditfilter.c if ((f->op != AUDIT_NOT_EQUAL) && (f->op != AUDIT_EQUAL) f 471 kernel/auditfilter.c && (f->op != AUDIT_NEGATE) && (f->op)) { f 475 kernel/auditfilter.c entry->rule.arch_f = f; f 478 kernel/auditfilter.c if (f->val & ~15) f 482 kernel/auditfilter.c if ((f->val & ~S_IFMT) > S_IFMT) f 486 kernel/auditfilter.c err = audit_to_inode(&entry->rule, f); f 492 kernel/auditfilter.c entry->rule.vers_ops = (f->op & AUDIT_OPERATORS) ? 2 : 1; f 496 kernel/auditfilter.c if (f->op & AUDIT_NEGATE) f 497 kernel/auditfilter.c f->op = AUDIT_NOT_EQUAL; f 498 kernel/auditfilter.c else if (!f->op) f 499 kernel/auditfilter.c f->op = AUDIT_EQUAL; f 500 kernel/auditfilter.c else if (f->op == AUDIT_OPERATORS) { f 546 kernel/auditfilter.c struct audit_field *f = &entry->rule.fields[i]; f 553 kernel/auditfilter.c f->op = data->fieldflags[i] & AUDIT_OPERATORS; f 554 kernel/auditfilter.c f->type = data->fields[i]; f 555 kernel/auditfilter.c f->val = data->values[i]; f 556 kernel/auditfilter.c f->lsm_str = NULL; f 557 kernel/auditfilter.c f->lsm_rule = NULL; f 558 kernel/auditfilter.c switch(f->type) { f 582 kernel/auditfilter.c entry->rule.arch_f = f; f 594 kernel/auditfilter.c str = audit_unpack_string(&bufp, &remain, f->val); f 597 kernel/auditfilter.c entry->rule.buflen += f->val; f 599 kernel/auditfilter.c err = security_audit_rule_init(f->type, f->op, str, f 600 kernel/auditfilter.c (void **)&f->lsm_rule); f 612 kernel/auditfilter.c f->lsm_str = str; f 615 kernel/auditfilter.c str = audit_unpack_string(&bufp, &remain, f->val); f 618 kernel/auditfilter.c entry->rule.buflen += f->val; f 620 kernel/auditfilter.c err = audit_to_watch(&entry->rule, str, f->val, f->op); f 627 kernel/auditfilter.c str = audit_unpack_string(&bufp, &remain, f->val); f 630 kernel/auditfilter.c entry->rule.buflen += f->val; f 632 kernel/auditfilter.c err = audit_make_tree(&entry->rule, str, f->op); f 638 kernel/auditfilter.c err = audit_to_inode(&entry->rule, f); f 644 kernel/auditfilter.c if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN) f 646 kernel/auditfilter.c str = audit_unpack_string(&bufp, &remain, f->val); f 649 kernel/auditfilter.c entry->rule.buflen += f->val; f 653 kernel/auditfilter.c if (f->val & ~15) f 657 kernel/auditfilter.c if ((f->val & ~S_IFMT) > S_IFMT) f 744 kernel/auditfilter.c struct audit_field *f = &krule->fields[i]; f 746 kernel/auditfilter.c data->fields[i] = f->type; f 747 kernel/auditfilter.c data->fieldflags[i] = f->op; f 748 kernel/auditfilter.c switch(f->type) { f 760 kernel/auditfilter.c audit_pack_string(&bufp, f->lsm_str); f 776 kernel/auditfilter.c data->values[i] = f->val; f 1702 kernel/auditfilter.c struct audit_field *f = &rule->fields[i]; f 1705 kernel/auditfilter.c switch (f->type) { f 1707 kernel/auditfilter.c result = audit_comparator(cb->creds.pid, f->op, f->val); f 1710 kernel/auditfilter.c result = audit_comparator(cb->creds.uid, f->op, f->val); f 1713 kernel/auditfilter.c result = audit_comparator(cb->creds.gid, f->op, f->val); f 1716 kernel/auditfilter.c result = audit_comparator(cb->loginuid, f->op, f->val); f 1762 kernel/auditfilter.c struct audit_field *f = &e->rule.fields[i]; f 1763 kernel/auditfilter.c if (f->type == AUDIT_MSGTYPE) { f 1764 kernel/auditfilter.c result = audit_comparator(type, f->op, f->val); f 428 kernel/auditsc.c struct audit_field *f = &rule->fields[i]; f 431 kernel/auditsc.c switch (f->type) { f 433 kernel/auditsc.c result = audit_comparator(tsk->pid, f->op, f->val); f 439 kernel/auditsc.c result = audit_comparator(ctx->ppid, f->op, f->val); f 443 kernel/auditsc.c result = audit_comparator(tsk->uid, f->op, f->val); f 446 kernel/auditsc.c result = audit_comparator(tsk->euid, f->op, f->val); f 449 kernel/auditsc.c result = audit_comparator(tsk->suid, f->op, f->val); f 452 kernel/auditsc.c result = audit_comparator(tsk->fsuid, f->op, f->val); f 455 kernel/auditsc.c result = audit_comparator(tsk->gid, f->op, f->val); f 458 kernel/auditsc.c result = audit_comparator(tsk->egid, f->op, f->val); f 461 kernel/auditsc.c result = audit_comparator(tsk->sgid, f->op, f->val); f 464 kernel/auditsc.c result = audit_comparator(tsk->fsgid, f->op, f->val); f 467 kernel/auditsc.c result = audit_comparator(tsk->personality, f->op, f->val); f 471 kernel/auditsc.c result = audit_comparator(ctx->arch, f->op, f->val); f 476 kernel/auditsc.c result = audit_comparator(ctx->return_code, f->op, f->val); f 480 kernel/auditsc.c if (f->val) f 481 kernel/auditsc.c result = audit_comparator(ctx->return_valid, f->op, AUDITSC_SUCCESS); f 483 kernel/auditsc.c result = audit_comparator(ctx->return_valid, f->op, AUDITSC_FAILURE); f 489 kernel/auditsc.c f->op, f->val); f 492 kernel/auditsc.c if (audit_comparator(MAJOR(ctx->names[j].dev), f->op, f->val)) { f 502 kernel/auditsc.c f->op, f->val); f 505 kernel/auditsc.c if (audit_comparator(MINOR(ctx->names[j].dev), f->op, f->val)) { f 514 kernel/auditsc.c result = (name->ino == f->val); f 517 kernel/auditsc.c if (audit_comparator(ctx->names[j].ino, f->op, f->val)) { f 536 kernel/auditsc.c result = audit_comparator(tsk->loginuid, f->op, f->val); f 548 kernel/auditsc.c if (f->lsm_rule) { f 553 kernel/auditsc.c result = security_audit_rule_match(sid, f->type, f 554 kernel/auditsc.c f->op, f 555 kernel/auditsc.c f->lsm_rule, f 566 kernel/auditsc.c if (f->lsm_rule) { f 570 kernel/auditsc.c name->osid, f->type, f->op, f 571 kernel/auditsc.c f->lsm_rule, ctx); f 576 kernel/auditsc.c f->type, f->op, f 577 kernel/auditsc.c f->lsm_rule, ctx)) { f 590 kernel/auditsc.c if (security_audit_rule_match(axi->osid, f->type, f->op, f->lsm_rule, ctx)) { f 604 kernel/auditsc.c result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val); f 611 kernel/auditsc.c result = audit_match_perm(ctx, f->val); f 614 kernel/auditsc.c result = audit_match_filetype(ctx, f->val); f 31 kernel/irq/internals.h #define P(f) if (desc->status & f) printk("%14s set\n", #f) f 153 kernel/kmod.c struct files_struct *f = current->files; f 158 kernel/kmod.c spin_lock(&f->file_lock); f 159 kernel/kmod.c fdt = files_fdtable(f); f 162 kernel/kmod.c spin_unlock(&f->file_lock); f 419 kernel/kmod.c struct file *f; f 421 kernel/kmod.c f = create_write_pipe(0); f 422 kernel/kmod.c if (IS_ERR(f)) f 423 kernel/kmod.c return PTR_ERR(f); f 424 kernel/kmod.c *filp = f; f 426 kernel/kmod.c f = create_read_pipe(f, 0); f 427 kernel/kmod.c if (IS_ERR(f)) { f 429 kernel/kmod.c return PTR_ERR(f); f 431 kernel/kmod.c sub_info->stdin = f; f 209 kernel/rcupreempt.c #define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace)); f 215 kernel/rcupreempt.c #define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace)); f 221 kernel/rcupreempt.c #define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace)); f 232 kernel/rcupreempt_trace.c int f = rcu_batches_completed() & 0x1; f 245 kernel/rcupreempt_trace.c flipctr[!f], f 246 kernel/rcupreempt_trace.c flipctr[f], f 274 kernel/workqueue.c work_func_t f = work->func; f 295 kernel/workqueue.c f(work); f 305 kernel/workqueue.c print_symbol("%s\n", (unsigned long)f); f 17 lib/halfmd4.c (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s))) f 334 lib/inflate.c unsigned f; /* i repeats in table every f entries */ f 467 lib/inflate.c if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ f 470 lib/inflate.c f -= a + 1; /* deduct codes from patterns left */ f 475 lib/inflate.c if ((f <<= 1) <= *++xp) f 477 lib/inflate.c f -= *xp; /* else deduct codes from patterns */ f 531 lib/inflate.c f = 1 << (k - w); f 532 lib/inflate.c for (j = i >> w; j < z; j += f) f 422 lib/zlib_deflate/deftree.c ush f; /* frequency */ f 444 lib/zlib_deflate/deftree.c f = tree[n].Freq; f 445 lib/zlib_deflate/deftree.c s->opt_len += (ulg)f * (bits + xbits); f 446 lib/zlib_deflate/deftree.c if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); f 77 mm/hugetlb.c if (f <= rg->to) f 81 mm/hugetlb.c if (f > rg->from) f 82 mm/hugetlb.c f = rg->from; f 102 mm/hugetlb.c nrg->from = f; f 114 mm/hugetlb.c if (f <= rg->to) f 124 mm/hugetlb.c nrg->from = f; f 125 mm/hugetlb.c nrg->to = f; f 129 mm/hugetlb.c return t - f; f 133 mm/hugetlb.c if (f > rg->from) f 134 mm/hugetlb.c f = rg->from; f 135 mm/hugetlb.c chg = t - f; f 196 mm/hugetlb.c if (rg->to <= f) f 201 mm/hugetlb.c seg_from = max(rg->from, f); f 3000 mm/memory.c struct file *f = vma->vm_file; f 3005 mm/memory.c p = d_path(&f->f_path, buf, PAGE_SIZE); f 884 mm/vmalloc.c area->size, f, NULL)) { f 161 net/8021q/vlan_netlink.c struct ifla_vlan_flags f; f 168 net/8021q/vlan_netlink.c f.flags = vlan->flags; f 169 net/8021q/vlan_netlink.c f.mask = ~0; f 170 net/8021q/vlan_netlink.c NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f); f 540 net/atm/lec.c struct net_bridge_fdb_entry *f; f 555 net/atm/lec.c f = br_fdb_get_hook(dev->br_port->br, f 557 net/atm/lec.c if (f != NULL && f->dst->dev != dev f 558 net/atm/lec.c && f->dst->state == BR_STATE_FORWARDING) { f 570 net/atm/lec.c br_fdb_put_hook(f); f 581 net/atm/lec.c if (f != NULL) f 582 net/atm/lec.c br_fdb_put_hook(f); f 148 net/bluetooth/bnep/core.c struct bnep_proto_filter *f = s->proto_filter; f 152 net/bluetooth/bnep/core.c f[i].start = get_unaligned_be16(data++); f 153 net/bluetooth/bnep/core.c f[i].end = get_unaligned_be16(data++); f 156 net/bluetooth/bnep/core.c f[i].start, f[i].end); f 160 net/bluetooth/bnep/core.c memset(f + i, 0, sizeof(*f)); f 171 net/bluetooth/bnep/netdev.c struct bnep_proto_filter *f = s->proto_filter; f 174 net/bluetooth/bnep/netdev.c for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { f 175 net/bluetooth/bnep/netdev.c if (proto >= f[i].start && proto <= f[i].end) f 511 net/bluetooth/hci_sock.c struct hci_filter *f = &hci_pi(sk)->filter; f 513 net/bluetooth/hci_sock.c uf.type_mask = f->type_mask; f 514 net/bluetooth/hci_sock.c uf.opcode = f->opcode; f 515 net/bluetooth/hci_sock.c uf.event_mask[0] = *((u32 *) f->event_mask + 0); f 516 net/bluetooth/hci_sock.c uf.event_mask[1] = *((u32 *) f->event_mask + 1); f 532 net/bluetooth/hci_sock.c struct hci_filter *f = &hci_pi(sk)->filter; f 534 net/bluetooth/hci_sock.c f->type_mask = uf.type_mask; f 535 net/bluetooth/hci_sock.c f->opcode = uf.opcode; f 536 net/bluetooth/hci_sock.c *((u32 *) f->event_mask + 0) = uf.event_mask[0]; f 537 net/bluetooth/hci_sock.c *((u32 *) f->event_mask + 1) = uf.event_mask[1]; f 582 net/bluetooth/hci_sock.c struct hci_filter *f = &hci_pi(sk)->filter; f 584 net/bluetooth/hci_sock.c uf.type_mask = f->type_mask; f 585 net/bluetooth/hci_sock.c uf.opcode = f->opcode; f 586 net/bluetooth/hci_sock.c uf.event_mask[0] = *((u32 *) f->event_mask + 0); f 587 net/bluetooth/hci_sock.c uf.event_mask[1] = *((u32 *) f->event_mask + 1); f 195 net/bluetooth/rfcomm/core.c u8 f = __crc(data); f 198 net/bluetooth/rfcomm/core.c f = rfcomm_crc_table[f ^ data[2]]; f 200 net/bluetooth/rfcomm/core.c return rfcomm_crc_table[f ^ fcs] != 0xcf; f 76 net/bridge/br_fdb.c hlist_del_rcu(&f->hlist); f 77 net/bridge/br_fdb.c br_fdb_put(f); f 91 net/bridge/br_fdb.c struct net_bridge_fdb_entry *f; f 93 net/bridge/br_fdb.c f = hlist_entry(h, struct net_bridge_fdb_entry, hlist); f 94 net/bridge/br_fdb.c if (f->dst == p && f->is_local) { f 100 net/bridge/br_fdb.c f->addr.addr)) { f 101 net/bridge/br_fdb.c f->dst = op; f 107 net/bridge/br_fdb.c fdb_delete(f); f 128 net/bridge/br_fdb.c struct net_bridge_fdb_entry *f; f 131 net/bridge/br_fdb.c hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { f 133 net/bridge/br_fdb.c if (f->is_static) f 135 net/bridge/br_fdb.c this_timer = f->ageing_timer + delay; f 137 net/bridge/br_fdb.c fdb_delete(f); f 156 net/bridge/br_fdb.c struct net_bridge_fdb_entry *f; f 158 net/bridge/br_fdb.c hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { f 159 net/bridge/br_fdb.c if (!f->is_static) f 160 net/bridge/br_fdb.c fdb_delete(f); f 180 net/bridge/br_fdb.c struct net_bridge_fdb_entry *f f 182 net/bridge/br_fdb.c if (f->dst != p) f 185 net/bridge/br_fdb.c if (f->is_static && !do_all) f 192 net/bridge/br_fdb.c if (f->is_local) { f 197 net/bridge/br_fdb.c f->addr.addr)) { f 198 net/bridge/br_fdb.c f->dst = op; f 204 net/bridge/br_fdb.c fdb_delete(f); f 267 net/bridge/br_fdb.c struct net_bridge_fdb_entry *f; f 273 net/bridge/br_fdb.c hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) { f 277 net/bridge/br_fdb.c if (has_expired(br, f)) f 286 net/bridge/br_fdb.c memcpy(fe->mac_addr, f->addr.addr, ETH_ALEN); f 289 net/bridge/br_fdb.c fe->port_no = f->dst->port_no; f 290 net/bridge/br_fdb.c fe->port_hi = f->dst->port_no >> 8; f 292 net/bridge/br_fdb.c fe->is_local = f->is_local; f 293 net/bridge/br_fdb.c if (!f->is_static) f 294 net/bridge/br_fdb.c fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->ageing_timer); f 1064 net/core/pktgen.c char f[32]; f 1065 net/core/pktgen.c memset(f, 0, 32); f 1066 net/core/pktgen.c len = strn_len(&user_buffer[i], sizeof(f) - 1); f 1070 net/core/pktgen.c if (copy_from_user(f, &user_buffer[i], len)) f 1073 net/core/pktgen.c if (strcmp(f, "IPSRC_RND") == 0) f 1076 net/core/pktgen.c else if (strcmp(f, "!IPSRC_RND") == 0) f 1079 net/core/pktgen.c else if (strcmp(f, "TXSIZE_RND") == 0) f 1082 net/core/pktgen.c else if (strcmp(f, "!TXSIZE_RND") == 0) f 1085 net/core/pktgen.c else if (strcmp(f, "IPDST_RND") == 0) f 1088 net/core/pktgen.c else if (strcmp(f, "!IPDST_RND") == 0) f 1091 net/core/pktgen.c else if (strcmp(f, "UDPSRC_RND") == 0) f 1094 net/core/pktgen.c else if (strcmp(f, "!UDPSRC_RND") == 0) f 1097 net/core/pktgen.c else if (strcmp(f, "UDPDST_RND") == 0) f 1100 net/core/pktgen.c else if (strcmp(f, "!UDPDST_RND") == 0) f 1103 net/core/pktgen.c else if (strcmp(f, "MACSRC_RND") == 0) f 1106 net/core/pktgen.c else if (strcmp(f, "!MACSRC_RND") == 0) f 1109 net/core/pktgen.c else if (strcmp(f, "MACDST_RND") == 0) f 1112 net/core/pktgen.c else if (strcmp(f, "!MACDST_RND") == 0) f 1115 net/core/pktgen.c else if (strcmp(f, "MPLS_RND") == 0) f 1118 net/core/pktgen.c else if (strcmp(f, "!MPLS_RND") == 0) f 1121 net/core/pktgen.c else if (strcmp(f, "VID_RND") == 0) f 1124 net/core/pktgen.c else if (strcmp(f, "!VID_RND") == 0) f 1127 net/core/pktgen.c else if (strcmp(f, "SVID_RND") == 0) f 1130 net/core/pktgen.c else if (strcmp(f, "!SVID_RND") == 0) f 1133 net/core/pktgen.c else if (strcmp(f, "FLOW_SEQ") == 0) f 1136 net/core/pktgen.c else if (strcmp(f, "QUEUE_MAP_RND") == 0) f 1139 net/core/pktgen.c else if (strcmp(f, "!QUEUE_MAP_RND") == 0) f 1142 net/core/pktgen.c else if (strcmp(f, "QUEUE_MAP_CPU") == 0) f 1145 net/core/pktgen.c else if (strcmp(f, "!QUEUE_MAP_CPU") == 0) f 1148 net/core/pktgen.c else if (strcmp(f, "IPSEC") == 0) f 1152 net/core/pktgen.c else if (strcmp(f, "!IPV6") == 0) f 1158 net/core/pktgen.c f, f 1750 net/core/pktgen.c char f[32]; f 1751 net/core/pktgen.c memset(f, 0, 32); f 1752 net/core/pktgen.c len = strn_len(&user_buffer[i], sizeof(f) - 1); f 1757 net/core/pktgen.c if (copy_from_user(f, &user_buffer[i], len)) f 1761 net/core/pktgen.c pktgen_add_device(t, f); f 1764 net/core/pktgen.c sprintf(pg_result, "OK: add_device=%s", f); f 1416 net/core/skbuff.c const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; f 1418 net/core/skbuff.c if (__splice_segment(f->page, f->page_offset, f->size, f 622 net/dccp/ccids/lib/tfrc_equation.c u32 f; f 641 net/dccp/ccids/lib/tfrc_equation.c f = tfrc_calc_x_lookup[index][1]; f 646 net/dccp/ccids/lib/tfrc_equation.c f = tfrc_calc_x_lookup[index][0]; f 659 net/dccp/ccids/lib/tfrc_equation.c return scaled_div32(result, f); f 73 net/decnet/dn_table.c for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next) f 76 net/decnet/dn_table.c for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next) f 127 net/decnet/dn_table.c struct dn_fib_node *f, **fp, *next; f 130 net/decnet/dn_table.c for(f = old_ht[i]; f; f = f->fn_next) { f 131 net/decnet/dn_table.c next = f->fn_next; f 132 net/decnet/dn_table.c for(fp = dn_chain_p(f->fn_key, dz); f 133 net/decnet/dn_table.c *fp && dn_key_leq((*fp)->fn_key, f->fn_key); f 136 net/decnet/dn_table.c f->fn_next = *fp; f 137 net/decnet/dn_table.c *fp = f; f 179 net/decnet/dn_table.c dn_fib_release_info(DN_FIB_INFO(f)); f 180 net/decnet/dn_table.c kmem_cache_free(dn_hash_kmem, f); f 365 net/decnet/dn_table.c skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL); f 370 net/decnet/dn_table.c f->fn_type, f->fn_scope, &f->fn_key, z, f 371 net/decnet/dn_table.c DN_FIB_INFO(f), 0); f 393 net/decnet/dn_table.c for(i = 0; f; i++, f = f->fn_next) { f 396 net/decnet/dn_table.c if (f->fn_state & DN_S_ZOMBIE) f 402 net/decnet/dn_table.c (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type, f 403 net/decnet/dn_table.c f->fn_scope, &f->fn_key, dz->dz_order, f 404 net/decnet/dn_table.c f->fn_info, NLM_F_MULTI) < 0) { f 508 net/decnet/dn_table.c struct dn_fib_node *new_f, *f, **fp, **del_fp; f 542 net/decnet/dn_table.c DN_FIB_SCAN(f, fp) { f 543 net/decnet/dn_table.c if (dn_key_leq(key, f->fn_key)) f 549 net/decnet/dn_table.c if (f && (f->fn_state & DN_S_ZOMBIE) && f 550 net/decnet/dn_table.c dn_key_eq(f->fn_key, key)) { f 552 net/decnet/dn_table.c fp = &f->fn_next; f 553 net/decnet/dn_table.c f = *fp; f 557 net/decnet/dn_table.c DN_FIB_SCAN_KEY(f, fp, key) { f 558 net/decnet/dn_table.c if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority) f 562 net/decnet/dn_table.c if (f && dn_key_eq(f->fn_key, key) && f 563 net/decnet/dn_table.c fi->fib_priority == DN_FIB_INFO(f)->fib_priority) { f 572 net/decnet/dn_table.c fp = &f->fn_next; f 573 net/decnet/dn_table.c f = *fp; f 580 net/decnet/dn_table.c DN_FIB_SCAN_KEY(f, fp, key) { f 581 net/decnet/dn_table.c if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority) f 583 net/decnet/dn_table.c if (f->fn_type == type && f->fn_scope == r->rtm_scope f 584 net/decnet/dn_table.c && DN_FIB_INFO(f) == fi) f 590 net/decnet/dn_table.c f = *fp; f 610 net/decnet/dn_table.c new_f->fn_next = f; f 617 net/decnet/dn_table.c f = *del_fp; f 619 net/decnet/dn_table.c *del_fp = f->fn_next; f 622 net/decnet/dn_table.c if (!(f->fn_state & DN_S_ZOMBIE)) f 623 net/decnet/dn_table.c dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); f 624 net/decnet/dn_table.c if (f->fn_state & DN_S_ACCESSED) f 626 net/decnet/dn_table.c dn_free_node(f); f 644 net/decnet/dn_table.c struct dn_fib_node **fp, **del_fp, *f; f 668 net/decnet/dn_table.c DN_FIB_SCAN(f, fp) { f 669 net/decnet/dn_table.c if (dn_key_eq(f->fn_key, key)) f 671 net/decnet/dn_table.c if (dn_key_leq(key, f->fn_key)) f 677 net/decnet/dn_table.c DN_FIB_SCAN_KEY(f, fp, key) { f 678 net/decnet/dn_table.c struct dn_fib_info *fi = DN_FIB_INFO(f); f 680 net/decnet/dn_table.c if (f->fn_state & DN_S_ZOMBIE) f 686 net/decnet/dn_table.c (!r->rtm_type || f->fn_type == r->rtm_type) && f 687 net/decnet/dn_table.c (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) && f 695 net/decnet/dn_table.c f = *del_fp; f 696 net/decnet/dn_table.c dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); f 700 net/decnet/dn_table.c *del_fp = f->fn_next; f 703 net/decnet/dn_table.c if (f->fn_state & DN_S_ACCESSED) f 705 net/decnet/dn_table.c dn_free_node(f); f 708 net/decnet/dn_table.c f->fn_state |= DN_S_ZOMBIE; f 709 net/decnet/dn_table.c if (f->fn_state & DN_S_ACCESSED) { f 710 net/decnet/dn_table.c f->fn_state &= ~DN_S_ACCESSED; f 726 net/decnet/dn_table.c struct dn_fib_node *f; f 728 net/decnet/dn_table.c while((f = *fp) != NULL) { f 729 net/decnet/dn_table.c struct dn_fib_info *fi = DN_FIB_INFO(f); f 731 net/decnet/dn_table.c if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) { f 733 net/decnet/dn_table.c *fp = f->fn_next; f 736 net/decnet/dn_table.c dn_free_node(f); f 740 net/decnet/dn_table.c fp = &f->fn_next; f 773 net/decnet/dn_table.c struct dn_fib_node *f; f 776 net/decnet/dn_table.c for(f = dz_chain(k, dz); f; f = f->fn_next) { f 777 net/decnet/dn_table.c if (!dn_key_eq(k, f->fn_key)) { f 778 net/decnet/dn_table.c if (dn_key_leq(k, f->fn_key)) f 784 net/decnet/dn_table.c f->fn_state |= DN_S_ACCESSED; f 786 net/decnet/dn_table.c if (f->fn_state&DN_S_ZOMBIE) f 789 net/decnet/dn_table.c if (f->fn_scope < flp->fld_scope) f 792 net/decnet/dn_table.c err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res); f 795 net/decnet/dn_table.c res->type = f->fn_type; f 796 net/decnet/dn_table.c res->scope = f->fn_scope; f 120 net/ipv4/fib_hash.c struct fib_node *f; f 122 net/ipv4/fib_hash.c hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) { f 125 net/ipv4/fib_hash.c hlist_del(&f->fn_hash); f 127 net/ipv4/fib_hash.c new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)]; f 128 net/ipv4/fib_hash.c hlist_add_head(&f->fn_hash, new_head); f 192 net/ipv4/fib_hash.c kmem_cache_free(fn_hash_kmem, f); f 198 net/ipv4/fib_hash.c if (fa == &f->fn_embedded_alias) f 256 net/ipv4/fib_hash.c struct fib_node *f; f 260 net/ipv4/fib_hash.c hlist_for_each_entry(f, node, head, fn_hash) { f 261 net/ipv4/fib_hash.c if (f->fn_key != k) f 264 net/ipv4/fib_hash.c err = fib_semantic_match(&f->fn_alias, f 266 net/ipv4/fib_hash.c f->fn_key, fz->fz_mask, f 283 net/ipv4/fib_hash.c struct fib_node *f; f 297 net/ipv4/fib_hash.c hlist_for_each_entry(f, node, &fz->fz_hash[0], fn_hash) { f 300 net/ipv4/fib_hash.c list_for_each_entry(fa, &f->fn_alias, fa_list) { f 350 net/ipv4/fib_hash.c struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)]; f 352 net/ipv4/fib_hash.c hlist_add_head(&f->fn_hash, head); f 360 net/ipv4/fib_hash.c struct fib_node *f; f 362 net/ipv4/fib_hash.c hlist_for_each_entry(f, node, head, fn_hash) { f 363 net/ipv4/fib_hash.c if (f->fn_key == key) f 364 net/ipv4/fib_hash.c return f; f 374 net/ipv4/fib_hash.c struct fib_node *f; f 406 net/ipv4/fib_hash.c f = fib_find_node(fz, key); f 408 net/ipv4/fib_hash.c if (!f) f 411 net/ipv4/fib_hash.c fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority); f 440 net/ipv4/fib_hash.c list_for_each_entry_continue(fa, &f->fn_alias, fa_list) { f 498 net/ipv4/fib_hash.c if (!f) { f 506 net/ipv4/fib_hash.c f = new_f; f 509 net/ipv4/fib_hash.c new_fa = &f->fn_embedded_alias; f 529 net/ipv4/fib_hash.c (fa ? &fa->fa_list : &f->fn_alias)); f 552 net/ipv4/fib_hash.c struct fib_node *f; f 570 net/ipv4/fib_hash.c f = fib_find_node(fz, key); f 572 net/ipv4/fib_hash.c if (!f) f 575 net/ipv4/fib_hash.c fa = fib_find_alias(&f->fn_alias, cfg->fc_tos, 0); f 581 net/ipv4/fib_hash.c list_for_each_entry_continue(fa, &f->fn_alias, fa_list) { f 609 net/ipv4/fib_hash.c if (list_empty(&f->fn_alias)) { f 610 net/ipv4/fib_hash.c hlist_del(&f->fn_hash); f 618 net/ipv4/fib_hash.c fn_free_alias(fa, f); f 620 net/ipv4/fib_hash.c fn_free_node(f); f 633 net/ipv4/fib_hash.c struct fib_node *f; f 636 net/ipv4/fib_hash.c hlist_for_each_entry_safe(f, node, n, head, fn_hash) { f 641 net/ipv4/fib_hash.c list_for_each_entry_safe(fa, fa_node, &f->fn_alias, fa_list) { f 647 net/ipv4/fib_hash.c if (list_empty(&f->fn_alias)) { f 648 net/ipv4/fib_hash.c hlist_del(&f->fn_hash); f 654 net/ipv4/fib_hash.c fn_free_alias(fa, f); f 659 net/ipv4/fib_hash.c fn_free_node(f); f 689 net/ipv4/fib_hash.c struct fib_node *f; f 694 net/ipv4/fib_hash.c hlist_for_each_entry(f, node, head, fn_hash) { f 697 net/ipv4/fib_hash.c list_for_each_entry(fa, &f->fn_alias, fa_list) { f 707 net/ipv4/fib_hash.c f->fn_key, f 1007 net/ipv4/fib_hash.c struct fib_node *f; f 1019 net/ipv4/fib_hash.c f = iter->fn; f 1022 net/ipv4/fib_hash.c prefix = f->fn_key; f 27 net/ipv4/inet_fragment.c struct inet_frags *f = (struct inet_frags *)dummy; f 31 net/ipv4/inet_fragment.c write_lock(&f->lock); f 32 net/ipv4/inet_fragment.c get_random_bytes(&f->rnd, sizeof(u32)); f 37 net/ipv4/inet_fragment.c hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) { f 38 net/ipv4/inet_fragment.c unsigned int hval = f->hashfn(q); f 44 net/ipv4/inet_fragment.c hlist_add_head(&q->list, &f->hash[hval]); f 48 net/ipv4/inet_fragment.c write_unlock(&f->lock); f 50 net/ipv4/inet_fragment.c mod_timer(&f->secret_timer, now + f->secret_interval); f 58 net/ipv4/inet_fragment.c INIT_HLIST_HEAD(&f->hash[i]); f 60 net/ipv4/inet_fragment.c rwlock_init(&f->lock); f 62 net/ipv4/inet_fragment.c f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ f 65 net/ipv4/inet_fragment.c setup_timer(&f->secret_timer, inet_frag_secret_rebuild, f 66 net/ipv4/inet_fragment.c (unsigned long)f); f 67 net/ipv4/inet_fragment.c f->secret_timer.expires = jiffies + f->secret_interval; f 68 net/ipv4/inet_fragment.c add_timer(&f->secret_timer); f 82 net/ipv4/inet_fragment.c del_timer(&f->secret_timer); f 91 net/ipv4/inet_fragment.c inet_frag_evictor(nf, f); f 98 net/ipv4/inet_fragment.c write_lock(&f->lock); f 102 net/ipv4/inet_fragment.c write_unlock(&f->lock); f 111 net/ipv4/inet_fragment.c fq_unlink(fq, f); f 126 net/ipv4/inet_fragment.c if (f->skb_free) f 127 net/ipv4/inet_fragment.c f->skb_free(skb); f 146 net/ipv4/inet_fragment.c frag_kfree_skb(nf, f, fp, work); f 151 net/ipv4/inet_fragment.c *work -= f->qsize; f 152 net/ipv4/inet_fragment.c atomic_sub(f->qsize, &nf->mem); f 154 net/ipv4/inet_fragment.c if (f->destructor) f 155 net/ipv4/inet_fragment.c f->destructor(q); f 168 net/ipv4/inet_fragment.c read_lock(&f->lock); f 170 net/ipv4/inet_fragment.c read_unlock(&f->lock); f 177 net/ipv4/inet_fragment.c read_unlock(&f->lock); f 181 net/ipv4/inet_fragment.c inet_frag_kill(q, f); f 185 net/ipv4/inet_fragment.c inet_frag_destroy(q, f, &work); f 203 net/ipv4/inet_fragment.c write_lock(&f->lock); f 209 net/ipv4/inet_fragment.c hash = f->hashfn(qp_in); f 215 net/ipv4/inet_fragment.c hlist_for_each_entry(qp, n, &f->hash[hash], list) { f 216 net/ipv4/inet_fragment.c if (qp->net == nf && f->match(qp, arg)) { f 218 net/ipv4/inet_fragment.c write_unlock(&f->lock); f 220 net/ipv4/inet_fragment.c inet_frag_put(qp_in, f); f 230 net/ipv4/inet_fragment.c hlist_add_head(&qp->list, &f->hash[hash]); f 233 net/ipv4/inet_fragment.c write_unlock(&f->lock); f 242 net/ipv4/inet_fragment.c q = kzalloc(f->qsize, GFP_ATOMIC); f 246 net/ipv4/inet_fragment.c f->constructor(q, arg); f 247 net/ipv4/inet_fragment.c atomic_add(f->qsize, &nf->mem); f 248 net/ipv4/inet_fragment.c setup_timer(&q->timer, f->frag_expire, (unsigned long)q); f 261 net/ipv4/inet_fragment.c q = inet_frag_alloc(nf, f, arg); f 265 net/ipv4/inet_fragment.c return inet_frag_intern(nf, q, f, arg); f 274 net/ipv4/inet_fragment.c hlist_for_each_entry(q, n, &f->hash[hash], list) { f 275 net/ipv4/inet_fragment.c if (q->net == nf && f->match(q, key)) { f 277 net/ipv4/inet_fragment.c read_unlock(&f->lock); f 281 net/ipv4/inet_fragment.c read_unlock(&f->lock); f 283 net/ipv4/inet_fragment.c return inet_frag_create(nf, f, key); f 2629 net/ipv4/tcp.c const struct skb_frag_struct *f = &shi->frags[i]; f 2630 net/ipv4/tcp.c sg_set_page(&sg, f->page, f->size, f->page_offset); f 2631 net/ipv4/tcp.c if (crypto_hash_update(desc, &sg, f->size)) f 2194 net/ipv4/tcp_ipv4.c seq_printf(f, "%4d: %08X:%04X %08X:%04X" f 2240 net/ipv4/tcp_ipv4.c seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " f 2276 net/ipv4/tcp_ipv4.c seq_printf(f, "%4d: %08X:%04X %08X:%04X" f 1629 net/ipv4/udp.c seq_printf(f, "%4d: %08X:%04X %08X:%04X" f 1152 net/ipx/af_ipx.c struct ipx_interface_definition f; f 1161 net/ipx/af_ipx.c f.ipx_network = sipx->sipx_network; f 1162 net/ipx/af_ipx.c memcpy(f.ipx_device, ifr.ifr_name, f 1163 net/ipx/af_ipx.c sizeof(f.ipx_device)); f 1164 net/ipx/af_ipx.c memcpy(f.ipx_node, sipx->sipx_node, IPX_NODE_LEN); f 1165 net/ipx/af_ipx.c f.ipx_dlink_type = sipx->sipx_type; f 1166 net/ipx/af_ipx.c f.ipx_special = sipx->sipx_special; f 1169 net/ipx/af_ipx.c rc = ipxitf_delete(&f); f 1171 net/ipx/af_ipx.c rc = ipxitf_create(&f); f 283 net/ipx/ipx_route.c struct ipx_route_definition f; f 284 net/ipx/ipx_route.c f.ipx_network = st->sipx_network; f 285 net/ipx/ipx_route.c f.ipx_router_network = sg->sipx_network; f 286 net/ipx/ipx_route.c memcpy(f.ipx_router_node, sg->sipx_node, IPX_NODE_LEN); f 287 net/ipx/ipx_route.c rc = ipxrtr_create(&f); f 3633 net/key/af_key.c seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); f 3635 net/key/af_key.c seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", f 270 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 279 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 293 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 310 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s", level * TAB_SIZE, " ", f->name); f 312 net/netfilter/nf_conntrack_h323_asn1.c switch (f->sz) { f 324 net/netfilter/nf_conntrack_h323_asn1.c if (base && (f->attr & DECODE)) { /* timeToLive */ f 325 net/netfilter/nf_conntrack_h323_asn1.c unsigned int v = get_uint(bs, len) + f->lb; f 327 net/netfilter/nf_conntrack_h323_asn1.c *((unsigned int *)(base + f->offset)) = v; f 338 net/netfilter/nf_conntrack_h323_asn1.c INC_BITS(bs, f->sz); f 352 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 354 net/netfilter/nf_conntrack_h323_asn1.c if ((f->attr & EXT) && get_bit(bs)) { f 357 net/netfilter/nf_conntrack_h323_asn1.c INC_BITS(bs, f->sz); f 370 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 373 net/netfilter/nf_conntrack_h323_asn1.c switch (f->sz) { f 375 net/netfilter/nf_conntrack_h323_asn1.c len = f->lb; f 380 net/netfilter/nf_conntrack_h323_asn1.c len += (*bs->cur++) + f->lb; f 404 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 407 net/netfilter/nf_conntrack_h323_asn1.c len = get_bits(bs, f->sz) + f->lb; f 422 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s", level * TAB_SIZE, " ", f->name); f 424 net/netfilter/nf_conntrack_h323_asn1.c switch (f->sz) { f 426 net/netfilter/nf_conntrack_h323_asn1.c if (f->lb > 2) { f 428 net/netfilter/nf_conntrack_h323_asn1.c if (base && (f->attr & DECODE)) { f 430 net/netfilter/nf_conntrack_h323_asn1.c IFTHEN(f->lb == 4, f 435 net/netfilter/nf_conntrack_h323_asn1.c *((unsigned int *)(base + f->offset)) = f 439 net/netfilter/nf_conntrack_h323_asn1.c len = f->lb; f 444 net/netfilter/nf_conntrack_h323_asn1.c len = (*bs->cur++) + f->lb; f 449 net/netfilter/nf_conntrack_h323_asn1.c len = get_len(bs) + f->lb; f 452 net/netfilter/nf_conntrack_h323_asn1.c len = get_bits(bs, f->sz) + f->lb; f 471 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 473 net/netfilter/nf_conntrack_h323_asn1.c switch (f->sz) { f 477 net/netfilter/nf_conntrack_h323_asn1.c len = (*bs->cur++) + f->lb; f 480 net/netfilter/nf_conntrack_h323_asn1.c len = get_bits(bs, f->sz) + f->lb; f 500 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 503 net/netfilter/nf_conntrack_h323_asn1.c base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; f 506 net/netfilter/nf_conntrack_h323_asn1.c ext = (f->attr & EXT) ? get_bit(bs) : 0; f 509 net/netfilter/nf_conntrack_h323_asn1.c bmp = get_bitmap(bs, f->sz); f 514 net/netfilter/nf_conntrack_h323_asn1.c for (i = opt = 0, son = f->fields; i < f->lb; i++, son++) { f 561 net/netfilter/nf_conntrack_h323_asn1.c bmp |= bmp2 >> f->sz; f 569 net/netfilter/nf_conntrack_h323_asn1.c if (i >= f->ub) { /* Newer Version? */ f 617 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 620 net/netfilter/nf_conntrack_h323_asn1.c base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; f 623 net/netfilter/nf_conntrack_h323_asn1.c switch (f->sz) { f 642 net/netfilter/nf_conntrack_h323_asn1.c count = get_bits(bs, f->sz); f 645 net/netfilter/nf_conntrack_h323_asn1.c count += f->lb; f 649 net/netfilter/nf_conntrack_h323_asn1.c effective_count = count > f->ub ? f->ub : count; f 655 net/netfilter/nf_conntrack_h323_asn1.c son = f->fields; f 707 net/netfilter/nf_conntrack_h323_asn1.c PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); f 710 net/netfilter/nf_conntrack_h323_asn1.c base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; f 713 net/netfilter/nf_conntrack_h323_asn1.c if ((f->attr & EXT) && get_bit(bs)) { f 715 net/netfilter/nf_conntrack_h323_asn1.c type = get_bits(bs, 7) + f->lb; f 718 net/netfilter/nf_conntrack_h323_asn1.c type = get_bits(bs, f->sz); f 719 net/netfilter/nf_conntrack_h323_asn1.c if (type >= f->lb) f 728 net/netfilter/nf_conntrack_h323_asn1.c if (type >= f->ub) { /* Newer version? */ f 737 net/netfilter/nf_conntrack_h323_asn1.c son = &f->fields[type]; f 64 net/netlink/genetlink.c struct genl_family *f; f 66 net/netlink/genetlink.c list_for_each_entry(f, genl_family_chain(id), family_list) f 67 net/netlink/genetlink.c if (f->id == id) f 68 net/netlink/genetlink.c return f; f 75 net/netlink/genetlink.c struct genl_family *f; f 79 net/netlink/genetlink.c list_for_each_entry(f, genl_family_chain(i), family_list) f 80 net/netlink/genetlink.c if (strcmp(f->name, name) == 0) f 81 net/netlink/genetlink.c return f; f 793 net/rxrpc/ar-internal.h printk("resurrected (%s)\n", f); f 48 net/sched/cls_basic.c struct basic_filter *f; f 50 net/sched/cls_basic.c list_for_each_entry(f, &head->flist, link) { f 51 net/sched/cls_basic.c if (!tcf_em_tree_match(skb, &f->ematches, NULL)) f 53 net/sched/cls_basic.c *res = f->res; f 54 net/sched/cls_basic.c r = tcf_exts_exec(skb, &f->exts, res); f 66 net/sched/cls_basic.c struct basic_filter *f; f 71 net/sched/cls_basic.c list_for_each_entry(f, &head->flist, link) f 72 net/sched/cls_basic.c if (f->handle == handle) f 73 net/sched/cls_basic.c l = (unsigned long) f; f 97 net/sched/cls_basic.c tcf_unbind_filter(tp, &f->res); f 98 net/sched/cls_basic.c tcf_exts_destroy(tp, &f->exts); f 99 net/sched/cls_basic.c tcf_em_tree_destroy(tp, &f->ematches); f 100 net/sched/cls_basic.c kfree(f); f 106 net/sched/cls_basic.c struct basic_filter *f, *n; f 108 net/sched/cls_basic.c list_for_each_entry_safe(f, n, &head->flist, link) { f 109 net/sched/cls_basic.c list_del(&f->link); f 110 net/sched/cls_basic.c basic_delete_filter(tp, f); f 118 net/sched/cls_basic.c struct basic_filter *t, *f = (struct basic_filter *) arg; f 121 net/sched/cls_basic.c if (t == f) { f 154 net/sched/cls_basic.c f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]); f 155 net/sched/cls_basic.c tcf_bind_filter(tp, &f->res, base); f 158 net/sched/cls_basic.c tcf_exts_change(tp, &f->exts, &e); f 159 net/sched/cls_basic.c tcf_em_tree_change(tp, &f->ematches, &t); f 173 net/sched/cls_basic.c struct basic_filter *f = (struct basic_filter *) *arg; f 183 net/sched/cls_basic.c if (f != NULL) { f 184 net/sched/cls_basic.c if (handle && f->handle != handle) f 186 net/sched/cls_basic.c return basic_set_parms(tp, f, base, tb, tca[TCA_RATE]); f 190 net/sched/cls_basic.c f = kzalloc(sizeof(*f), GFP_KERNEL); f 191 net/sched/cls_basic.c if (f == NULL) f 196 net/sched/cls_basic.c f->handle = handle; f 209 net/sched/cls_basic.c f->handle = head->hgenerator; f 212 net/sched/cls_basic.c err = basic_set_parms(tp, f, base, tb, tca[TCA_RATE]); f 217 net/sched/cls_basic.c list_add(&f->link, &head->flist); f 219 net/sched/cls_basic.c *arg = (unsigned long) f; f 223 net/sched/cls_basic.c if (*arg == 0UL && f) f 224 net/sched/cls_basic.c kfree(f); f 232 net/sched/cls_basic.c struct basic_filter *f; f 234 net/sched/cls_basic.c list_for_each_entry(f, &head->flist, link) { f 238 net/sched/cls_basic.c if (arg->fn(tp, (unsigned long) f, arg) < 0) { f 250 net/sched/cls_basic.c struct basic_filter *f = (struct basic_filter *) fh; f 253 net/sched/cls_basic.c if (f == NULL) f 256 net/sched/cls_basic.c t->tcm_handle = f->handle; f 262 net/sched/cls_basic.c if (f->res.classid) f 263 net/sched/cls_basic.c NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid); f 265 net/sched/cls_basic.c if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 || f 266 net/sched/cls_basic.c tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) f 330 net/sched/cls_flow.c struct flow_filter *f; f 336 net/sched/cls_flow.c list_for_each_entry(f, &head->filters, list) { f 337 net/sched/cls_flow.c u32 keys[f->nkeys]; f 339 net/sched/cls_flow.c if (!tcf_em_tree_match(skb, &f->ematches, NULL)) f 342 net/sched/cls_flow.c keymask = f->keymask; f 344 net/sched/cls_flow.c for (n = 0; n < f->nkeys; n++) { f 350 net/sched/cls_flow.c if (f->mode == FLOW_MODE_HASH) f 351 net/sched/cls_flow.c classid = jhash2(keys, f->nkeys, f->hashrnd); f 354 net/sched/cls_flow.c classid = (classid & f->mask) ^ f->xor; f 355 net/sched/cls_flow.c classid = (classid >> f->rshift) + f->addend; f 358 net/sched/cls_flow.c if (f->divisor) f 359 net/sched/cls_flow.c classid %= f->divisor; f 362 net/sched/cls_flow.c res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid); f 364 net/sched/cls_flow.c r = tcf_exts_exec(skb, &f->exts, res); f 374 net/sched/cls_flow.c struct flow_filter *f = (struct flow_filter *)arg; f 376 net/sched/cls_flow.c get_random_bytes(&f->hashrnd, 4); f 377 net/sched/cls_flow.c if (f->perturb_period) f 378 net/sched/cls_flow.c mod_timer(&f->perturb_timer, jiffies + f->perturb_period); f 401 net/sched/cls_flow.c struct flow_filter *f; f 445 net/sched/cls_flow.c f = (struct flow_filter *)*arg; f 446 net/sched/cls_flow.c if (f != NULL) { f 448 net/sched/cls_flow.c if (f->handle != handle && handle) f 451 net/sched/cls_flow.c mode = f->mode; f 458 net/sched/cls_flow.c perturb_period = f->perturb_period; f 489 net/sched/cls_flow.c f = kzalloc(sizeof(*f), GFP_KERNEL); f 490 net/sched/cls_flow.c if (f == NULL) f 493 net/sched/cls_flow.c f->handle = handle; f 494 net/sched/cls_flow.c f->mask = ~0U; f 496 net/sched/cls_flow.c get_random_bytes(&f->hashrnd, 4); f 497 net/sched/cls_flow.c f->perturb_timer.function = flow_perturbation; f 498 net/sched/cls_flow.c f->perturb_timer.data = (unsigned long)f; f 499 net/sched/cls_flow.c init_timer_deferrable(&f->perturb_timer); f 502 net/sched/cls_flow.c tcf_exts_change(tp, &f->exts, &e); f 503 net/sched/cls_flow.c tcf_em_tree_change(tp, &f->ematches, &t); f 508 net/sched/cls_flow.c f->keymask = keymask; f 509 net/sched/cls_flow.c f->nkeys = nkeys; f 512 net/sched/cls_flow.c f->mode = mode; f 515 net/sched/cls_flow.c f->mask = nla_get_u32(tb[TCA_FLOW_MASK]); f 517 net/sched/cls_flow.c f->xor = nla_get_u32(tb[TCA_FLOW_XOR]); f 519 net/sched/cls_flow.c f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); f 521 net/sched/cls_flow.c f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); f 524 net/sched/cls_flow.c f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); f 526 net/sched/cls_flow.c f->baseclass = baseclass; f 528 net/sched/cls_flow.c f->perturb_period = perturb_period; f 529 net/sched/cls_flow.c del_timer(&f->perturb_timer); f 531 net/sched/cls_flow.c mod_timer(&f->perturb_timer, jiffies + perturb_period); f 534 net/sched/cls_flow.c list_add_tail(&f->list, &head->filters); f 538 net/sched/cls_flow.c *arg = (unsigned long)f; f 550 net/sched/cls_flow.c del_timer_sync(&f->perturb_timer); f 551 net/sched/cls_flow.c tcf_exts_destroy(tp, &f->exts); f 552 net/sched/cls_flow.c tcf_em_tree_destroy(tp, &f->ematches); f 553 net/sched/cls_flow.c kfree(f); f 558 net/sched/cls_flow.c struct flow_filter *f = (struct flow_filter *)arg; f 561 net/sched/cls_flow.c list_del(&f->list); f 563 net/sched/cls_flow.c flow_destroy_filter(tp, f); f 582 net/sched/cls_flow.c struct flow_filter *f, *next; f 584 net/sched/cls_flow.c list_for_each_entry_safe(f, next, &head->filters, list) { f 585 net/sched/cls_flow.c list_del(&f->list); f 586 net/sched/cls_flow.c flow_destroy_filter(tp, f); f 594 net/sched/cls_flow.c struct flow_filter *f; f 596 net/sched/cls_flow.c list_for_each_entry(f, &head->filters, list) f 597 net/sched/cls_flow.c if (f->handle == handle) f 598 net/sched/cls_flow.c return (unsigned long)f; f 610 net/sched/cls_flow.c struct flow_filter *f = (struct flow_filter *)fh; f 613 net/sched/cls_flow.c if (f == NULL) f 616 net/sched/cls_flow.c t->tcm_handle = f->handle; f 622 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask); f 623 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode); f 625 net/sched/cls_flow.c if (f->mask != ~0 || f->xor != 0) { f 626 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask); f 627 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor); f 629 net/sched/cls_flow.c if (f->rshift) f 630 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift); f 631 net/sched/cls_flow.c if (f->addend) f 632 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend); f 634 net/sched/cls_flow.c if (f->divisor) f 635 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor); f 636 net/sched/cls_flow.c if (f->baseclass) f 637 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); f 639 net/sched/cls_flow.c if (f->perturb_period) f 640 net/sched/cls_flow.c NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ); f 642 net/sched/cls_flow.c if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) f 645 net/sched/cls_flow.c if (f->ematches.hdr.nmatches && f 646 net/sched/cls_flow.c tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0) f 651 net/sched/cls_flow.c if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0) f 664 net/sched/cls_flow.c struct flow_filter *f; f 666 net/sched/cls_flow.c list_for_each_entry(f, &head->filters, list) { f 669 net/sched/cls_flow.c if (arg->fn(tp, (unsigned long)f, arg) < 0) { f 85 net/sched/cls_fw.c struct fw_filter *f; f 91 net/sched/cls_fw.c for (f=head->ht[fw_hash(id)]; f; f=f->next) { f 92 net/sched/cls_fw.c if (f->id == id) { f 93 net/sched/cls_fw.c *res = f->res; f 95 net/sched/cls_fw.c if (!tcf_match_indev(skb, f->indev)) f 98 net/sched/cls_fw.c r = tcf_exts_exec(skb, &f->exts, res); f 120 net/sched/cls_fw.c struct fw_filter *f; f 125 net/sched/cls_fw.c for (f=head->ht[fw_hash(handle)]; f; f=f->next) { f 126 net/sched/cls_fw.c if (f->id == handle) f 127 net/sched/cls_fw.c return (unsigned long)f; f 144 net/sched/cls_fw.c tcf_unbind_filter(tp, &f->res); f 145 net/sched/cls_fw.c tcf_exts_destroy(tp, &f->exts); f 146 net/sched/cls_fw.c kfree(f); f 152 net/sched/cls_fw.c struct fw_filter *f; f 159 net/sched/cls_fw.c while ((f=head->ht[h]) != NULL) { f 160 net/sched/cls_fw.c head->ht[h] = f->next; f 161 net/sched/cls_fw.c fw_delete_filter(tp, f); f 170 net/sched/cls_fw.c struct fw_filter *f = (struct fw_filter*)arg; f 173 net/sched/cls_fw.c if (head == NULL || f == NULL) f 176 net/sched/cls_fw.c for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { f 177 net/sched/cls_fw.c if (*fp == f) { f 179 net/sched/cls_fw.c *fp = f->next; f 181 net/sched/cls_fw.c fw_delete_filter(tp, f); f 210 net/sched/cls_fw.c f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); f 211 net/sched/cls_fw.c tcf_bind_filter(tp, &f->res, base); f 216 net/sched/cls_fw.c err = tcf_change_indev(tp, f->indev, tb[TCA_FW_INDEV]); f 229 net/sched/cls_fw.c tcf_exts_change(tp, &f->exts, &e); f 243 net/sched/cls_fw.c struct fw_filter *f = (struct fw_filter *) *arg; f 255 net/sched/cls_fw.c if (f != NULL) { f 256 net/sched/cls_fw.c if (f->id != handle && handle) f 258 net/sched/cls_fw.c return fw_change_attrs(tp, f, tb, tca, base); f 279 net/sched/cls_fw.c f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); f 280 net/sched/cls_fw.c if (f == NULL) f 283 net/sched/cls_fw.c f->id = handle; f 285 net/sched/cls_fw.c err = fw_change_attrs(tp, f, tb, tca, base); f 289 net/sched/cls_fw.c f->next = head->ht[fw_hash(handle)]; f 291 net/sched/cls_fw.c head->ht[fw_hash(handle)] = f; f 294 net/sched/cls_fw.c *arg = (unsigned long)f; f 298 net/sched/cls_fw.c kfree(f); f 314 net/sched/cls_fw.c struct fw_filter *f; f 316 net/sched/cls_fw.c for (f = head->ht[h]; f; f = f->next) { f 321 net/sched/cls_fw.c if (arg->fn(tp, (unsigned long)f, arg) < 0) { f 334 net/sched/cls_fw.c struct fw_filter *f = (struct fw_filter*)fh; f 338 net/sched/cls_fw.c if (f == NULL) f 341 net/sched/cls_fw.c t->tcm_handle = f->id; f 343 net/sched/cls_fw.c if (!f->res.classid && !tcf_exts_is_available(&f->exts)) f 350 net/sched/cls_fw.c if (f->res.classid) f 351 net/sched/cls_fw.c NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid); f 353 net/sched/cls_fw.c if (strlen(f->indev)) f 354 net/sched/cls_fw.c NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev); f 359 net/sched/cls_fw.c if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0) f 364 net/sched/cls_fw.c if (tcf_exts_dump_stats(skb, &f->exts, &fw_ext_map) < 0) f 92 net/sched/cls_route.c head->fastmap[h].filter = f; f 117 net/sched/cls_route.c *res = f->res; \ f 118 net/sched/cls_route.c if (tcf_exts_is_available(&f->exts)) { \ f 119 net/sched/cls_route.c int r = tcf_exts_exec(skb, &f->exts, res); \ f 126 net/sched/cls_route.c route4_set_fastmap(head, id, iif, f); \ f 136 net/sched/cls_route.c struct route4_filter *f; f 152 net/sched/cls_route.c (f = head->fastmap[h].filter) != NULL) { f 153 net/sched/cls_route.c if (f == ROUTE4_FAILURE) f 156 net/sched/cls_route.c *res = f->res; f 164 net/sched/cls_route.c for (f = b->ht[route4_hash_from(id)]; f; f = f->next) f 165 net/sched/cls_route.c if (f->id == id) f 168 net/sched/cls_route.c for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next) f 169 net/sched/cls_route.c if (f->iif == iif) f 172 net/sched/cls_route.c for (f = b->ht[route4_hash_wild()]; f; f = f->next) f 222 net/sched/cls_route.c struct route4_filter *f; f 237 net/sched/cls_route.c for (f = b->ht[h2]; f; f = f->next) f 238 net/sched/cls_route.c if (f->handle == handle) f 239 net/sched/cls_route.c return (unsigned long)f; f 256 net/sched/cls_route.c tcf_unbind_filter(tp, &f->res); f 257 net/sched/cls_route.c tcf_exts_destroy(tp, &f->exts); f 258 net/sched/cls_route.c kfree(f); f 274 net/sched/cls_route.c struct route4_filter *f; f 276 net/sched/cls_route.c while ((f = b->ht[h2]) != NULL) { f 277 net/sched/cls_route.c b->ht[h2] = f->next; f 278 net/sched/cls_route.c route4_delete_filter(tp, f); f 290 net/sched/cls_route.c struct route4_filter **fp, *f = (struct route4_filter*)arg; f 295 net/sched/cls_route.c if (!head || !f) f 298 net/sched/cls_route.c h = f->handle; f 299 net/sched/cls_route.c b = f->bkt; f 302 net/sched/cls_route.c if (*fp == f) { f 304 net/sched/cls_route.c *fp = f->next; f 307 net/sched/cls_route.c route4_reset_fastmap(tp->q, head, f->id); f 308 net/sched/cls_route.c route4_delete_filter(tp, f); f 395 net/sched/cls_route.c if (fp->handle == f->handle) f 401 net/sched/cls_route.c f->id = to; f 404 net/sched/cls_route.c f->id = to | id<<16; f 406 net/sched/cls_route.c f->iif = id; f 408 net/sched/cls_route.c f->handle = nhandle; f 409 net/sched/cls_route.c f->bkt = b; f 413 net/sched/cls_route.c f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]); f 414 net/sched/cls_route.c tcf_bind_filter(tp, &f->res, base); f 417 net/sched/cls_route.c tcf_exts_change(tp, &f->exts, &e); f 431 net/sched/cls_route.c struct route4_filter *f, *f1, **fp; f 446 net/sched/cls_route.c if ((f = (struct route4_filter*)*arg) != NULL) { f 447 net/sched/cls_route.c if (f->handle != handle && handle) f 450 net/sched/cls_route.c if (f->bkt) f 451 net/sched/cls_route.c old_handle = f->handle; f 453 net/sched/cls_route.c err = route4_set_parms(tp, base, f, handle, head, tb, f 472 net/sched/cls_route.c f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); f 473 net/sched/cls_route.c if (f == NULL) f 476 net/sched/cls_route.c err = route4_set_parms(tp, base, f, handle, head, tb, f 482 net/sched/cls_route.c h = from_hash(f->handle >> 16); f 483 net/sched/cls_route.c for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next) f 484 net/sched/cls_route.c if (f->handle < f1->handle) f 487 net/sched/cls_route.c f->next = f1; f 489 net/sched/cls_route.c *fp = f; f 491 net/sched/cls_route.c if (old_handle && f->handle != old_handle) { f 496 net/sched/cls_route.c if (*fp == f) { f 497 net/sched/cls_route.c *fp = f->next; f 505 net/sched/cls_route.c route4_reset_fastmap(tp->q, head, f->id); f 506 net/sched/cls_route.c *arg = (unsigned long)f; f 510 net/sched/cls_route.c kfree(f); f 530 net/sched/cls_route.c struct route4_filter *f; f 532 net/sched/cls_route.c for (f = b->ht[h1]; f; f = f->next) { f 537 net/sched/cls_route.c if (arg->fn(tp, (unsigned long)f, arg) < 0) { f 551 net/sched/cls_route.c struct route4_filter *f = (struct route4_filter*)fh; f 556 net/sched/cls_route.c if (f == NULL) f 559 net/sched/cls_route.c t->tcm_handle = f->handle; f 565 net/sched/cls_route.c if (!(f->handle&0x8000)) { f 566 net/sched/cls_route.c id = f->id&0xFF; f 569 net/sched/cls_route.c if (f->handle&0x80000000) { f 570 net/sched/cls_route.c if ((f->handle>>16) != 0xFFFF) f 571 net/sched/cls_route.c NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); f 573 net/sched/cls_route.c id = f->id>>16; f 576 net/sched/cls_route.c if (f->res.classid) f 577 net/sched/cls_route.c NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid); f 579 net/sched/cls_route.c if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0) f 584 net/sched/cls_route.c if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0) f 127 net/sched/cls_rsvp.h int r = tcf_exts_exec(skb, &f->exts, res); \ f 139 net/sched/cls_rsvp.h struct rsvp_filter *f; f 181 net/sched/cls_rsvp.h for (f = s->ht[h2]; f; f = f->next) { f 182 net/sched/cls_rsvp.h if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] && f 183 net/sched/cls_rsvp.h !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key)) f 185 net/sched/cls_rsvp.h && src[0] == f->src[0] f 186 net/sched/cls_rsvp.h && src[1] == f->src[1] f 187 net/sched/cls_rsvp.h && src[2] == f->src[2] f 190 net/sched/cls_rsvp.h *res = f->res; f 194 net/sched/cls_rsvp.h if (f->tunnelhdr == 0) f 197 net/sched/cls_rsvp.h tunnelid = f->res.classid; f 198 net/sched/cls_rsvp.h nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr)); f 204 net/sched/cls_rsvp.h for (f = s->ht[16]; f; f = f->next) { f 205 net/sched/cls_rsvp.h *res = f->res; f 219 net/sched/cls_rsvp.h struct rsvp_filter *f; f 227 net/sched/cls_rsvp.h for (f = s->ht[h2]; f; f = f->next) { f 228 net/sched/cls_rsvp.h if (f->handle == handle) f 229 net/sched/cls_rsvp.h return (unsigned long)f; f 254 net/sched/cls_rsvp.h tcf_unbind_filter(tp, &f->res); f 255 net/sched/cls_rsvp.h tcf_exts_destroy(tp, &f->exts); f 256 net/sched/cls_rsvp.h kfree(f); f 277 net/sched/cls_rsvp.h struct rsvp_filter *f; f 279 net/sched/cls_rsvp.h while ((f = s->ht[h2]) != NULL) { f 280 net/sched/cls_rsvp.h s->ht[h2] = f->next; f 281 net/sched/cls_rsvp.h rsvp_delete_filter(tp, f); f 292 net/sched/cls_rsvp.h struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg; f 293 net/sched/cls_rsvp.h unsigned h = f->handle; f 295 net/sched/cls_rsvp.h struct rsvp_session *s = f->sess; f 299 net/sched/cls_rsvp.h if (*fp == f) { f 301 net/sched/cls_rsvp.h *fp = f->next; f 303 net/sched/cls_rsvp.h rsvp_delete_filter(tp, f); f 369 net/sched/cls_rsvp.h struct rsvp_filter *f; f 371 net/sched/cls_rsvp.h for (f = s->ht[h2]; f; f = f->next) { f 372 net/sched/cls_rsvp.h if (f->tunnelhdr == 0) f 374 net/sched/cls_rsvp.h data->tgenerator = f->res.classid; f 415 net/sched/cls_rsvp.h struct rsvp_filter *f, **fp; f 436 net/sched/cls_rsvp.h if ((f = (struct rsvp_filter*)*arg) != NULL) { f 439 net/sched/cls_rsvp.h if (f->handle != handle && handle) f 442 net/sched/cls_rsvp.h f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); f 443 net/sched/cls_rsvp.h tcf_bind_filter(tp, &f->res, base); f 446 net/sched/cls_rsvp.h tcf_exts_change(tp, &f->exts, &e); f 458 net/sched/cls_rsvp.h f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL); f 459 net/sched/cls_rsvp.h if (f == NULL) f 464 net/sched/cls_rsvp.h memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); f 465 net/sched/cls_rsvp.h h2 = hash_src(f->src); f 469 net/sched/cls_rsvp.h f->spi = pinfo->spi; f 470 net/sched/cls_rsvp.h f->tunnelhdr = pinfo->tunnelhdr; f 473 net/sched/cls_rsvp.h f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); f 479 net/sched/cls_rsvp.h if ((f->handle = gen_handle(tp, h1 | (h2<<8))) == 0) f 482 net/sched/cls_rsvp.h if (f->tunnelhdr) { f 484 net/sched/cls_rsvp.h if (f->res.classid > 255) f 488 net/sched/cls_rsvp.h if (f->res.classid == 0 && f 489 net/sched/cls_rsvp.h (f->res.classid = gen_tunnel(data)) == 0) f 509 net/sched/cls_rsvp.h f->sess = s; f 510 net/sched/cls_rsvp.h if (f->tunnelhdr == 0) f 511 net/sched/cls_rsvp.h tcf_bind_filter(tp, &f->res, base); f 513 net/sched/cls_rsvp.h tcf_exts_change(tp, &f->exts, &e); f 516 net/sched/cls_rsvp.h if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask) f 518 net/sched/cls_rsvp.h f->next = *fp; f 520 net/sched/cls_rsvp.h *fp = f; f 522 net/sched/cls_rsvp.h *arg = (unsigned long)f; f 551 net/sched/cls_rsvp.h kfree(f); f 570 net/sched/cls_rsvp.h struct rsvp_filter *f; f 572 net/sched/cls_rsvp.h for (f = s->ht[h1]; f; f = f->next) { f 577 net/sched/cls_rsvp.h if (arg->fn(tp, (unsigned long)f, arg) < 0) { f 591 net/sched/cls_rsvp.h struct rsvp_filter *f = (struct rsvp_filter*)fh; f 597 net/sched/cls_rsvp.h if (f == NULL) f 599 net/sched/cls_rsvp.h s = f->sess; f 601 net/sched/cls_rsvp.h t->tcm_handle = f->handle; f 609 net/sched/cls_rsvp.h pinfo.spi = f->spi; f 612 net/sched/cls_rsvp.h pinfo.tunnelhdr = f->tunnelhdr; f 615 net/sched/cls_rsvp.h if (f->res.classid) f 616 net/sched/cls_rsvp.h NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); f 617 net/sched/cls_rsvp.h if (((f->handle>>8)&0xFF) != 16) f 618 net/sched/cls_rsvp.h NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); f 620 net/sched/cls_rsvp.h if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) f 625 net/sched/cls_rsvp.h if (tcf_exts_dump_stats(skb, &f->exts, &rsvp_ext_map) < 0) f 72 net/sched/cls_tcindex.c struct tcindex_filter *f; f 78 net/sched/cls_tcindex.c for (f = p->h[key % p->hash]; f; f = f->next) f 79 net/sched/cls_tcindex.c if (f->key == key) f 80 net/sched/cls_tcindex.c return &f->result; f 91 net/sched/cls_tcindex.c struct tcindex_filter_result *f; f 97 net/sched/cls_tcindex.c f = tcindex_lookup(p, key); f 98 net/sched/cls_tcindex.c if (!f) { f 106 net/sched/cls_tcindex.c *res = f->res; f 109 net/sched/cls_tcindex.c return tcf_exts_exec(skb, &f->exts, res); f 128 net/sched/cls_tcindex.c pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f); f 155 net/sched/cls_tcindex.c struct tcindex_filter *f = NULL; f 157 net/sched/cls_tcindex.c pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f); f 172 net/sched/cls_tcindex.c f = *walk; f 175 net/sched/cls_tcindex.c *walk = f->next; f 181 net/sched/cls_tcindex.c kfree(f); f 213 net/sched/cls_tcindex.c struct tcindex_filter *f = NULL; /* make gcc behave */ f 283 net/sched/cls_tcindex.c cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); f 296 net/sched/cls_tcindex.c f = kzalloc(sizeof(*f), GFP_KERNEL); f 297 net/sched/cls_tcindex.c if (!f) f 318 net/sched/cls_tcindex.c f->key = handle; f 319 net/sched/cls_tcindex.c f->result = new_filter_result; f 320 net/sched/cls_tcindex.c f->next = NULL; f 323 net/sched/cls_tcindex.c *fp = f; f 367 net/sched/cls_tcindex.c struct tcindex_filter *f, *next; f 389 net/sched/cls_tcindex.c for (f = p->h[i]; f; f = next) { f 390 net/sched/cls_tcindex.c next = f->next; f 392 net/sched/cls_tcindex.c if (walker->fn(tp, (unsigned long) &f->result, f 455 net/sched/cls_tcindex.c struct tcindex_filter *f; f 460 net/sched/cls_tcindex.c for (f = p->h[i]; !t->tcm_handle && f; f 461 net/sched/cls_tcindex.c f = f->next) { f 462 net/sched/cls_tcindex.c if (&f->result == r) f 463 net/sched/cls_tcindex.c t->tcm_handle = f->key; f 682 net/sched/sch_hfsc.c u64 vt, f, cur_time; f 760 net/sched/sch_hfsc.c f = max(cl->cl_myf, cl->cl_cfmin); f 761 net/sched/sch_hfsc.c if (f != cl->cl_f) { f 762 net/sched/sch_hfsc.c cl->cl_f = f; f 772 net/sched/sch_hfsc.c u64 f; /* , myf_bound, delta; */ f 849 net/sched/sch_hfsc.c f = max(cl->cl_myf, cl->cl_cfmin); f 850 net/sched/sch_hfsc.c if (f != cl->cl_f) { f 851 net/sched/sch_hfsc.c cl->cl_f = f; f 678 net/sunrpc/rpcb_clnt.c int c, i, f, first, val; f 698 net/sunrpc/rpcb_clnt.c f = 1; f 702 net/sunrpc/rpcb_clnt.c val += (c - '0') * f; f 703 net/sunrpc/rpcb_clnt.c f *= 10; f 708 net/sunrpc/rpcb_clnt.c f = 1; f 175 net/x25/x25_proc.c struct x25_forward *f; f 179 net/x25/x25_proc.c f = list_entry(entry, struct x25_forward, node); f 184 net/x25/x25_proc.c f = NULL; f 186 net/x25/x25_proc.c return f; f 200 net/x25/x25_proc.c struct x25_forward *f; f 204 net/x25/x25_proc.c f = NULL; f 206 net/x25/x25_proc.c f = list_entry(x25_forward_list.next, f 210 net/x25/x25_proc.c f = v; f 211 net/x25/x25_proc.c if (f->node.next != &x25_forward_list) f 212 net/x25/x25_proc.c f = list_entry(f->node.next, struct x25_forward, node); f 214 net/x25/x25_proc.c f = NULL; f 216 net/x25/x25_proc.c return f; f 228 net/x25/x25_proc.c struct x25_forward *f; f 235 net/x25/x25_proc.c f = v; f 238 net/x25/x25_proc.c f->lci, f->dev1->name, f->dev2->name); f 675 net/xfrm/xfrm_user.c u32 *f; f 681 net/xfrm/xfrm_user.c f = nlmsg_data(nlh); f 682 net/xfrm/xfrm_user.c *f = flags; f 733 net/xfrm/xfrm_user.c u32 *f; f 739 net/xfrm/xfrm_user.c f = nlmsg_data(nlh); f 740 net/xfrm/xfrm_user.c *f = flags; f 242 scripts/genksyms/genksyms.c putc(symbol_type_name[list->tag][0], f); f 243 scripts/genksyms/genksyms.c putc('#', f); f 245 scripts/genksyms/genksyms.c fputs(list->string, f); f 255 scripts/genksyms/genksyms.c fputs("(nil)", f); f 272 scripts/genksyms/genksyms.c print_node(f, *b++); f 273 scripts/genksyms/genksyms.c putc(' ', f); f 61 scripts/kconfig/lxdialog/util.c dlg.dialog.fg = (f); \ f 2682 security/selinux/ss/services.c struct audit_field *f = &rule->fields[i]; f 2683 security/selinux/ss/services.c switch (f->type) { f 2464 security/smack/smack_lsm.c struct audit_field *f; f 2468 security/smack/smack_lsm.c f = &krule->fields[i]; f 2470 security/smack/smack_lsm.c if (f->type == AUDIT_SUBJ_USER || f->type == AUDIT_OBJ_USER) f 1783 sound/core/oss/pcm_oss.c int f = snd_pcm_oss_format_to(fmt); f 1784 sound/core/oss/pcm_oss.c if (f >= 0) f 1785 sound/core/oss/pcm_oss.c formats |= f; f 324 sound/core/oss/pcm_plugin.c int f = preferred_formats[i]; f 326 sound/core/oss/pcm_plugin.c if (!snd_mask_test(format_mask, f)) f 328 sound/core/oss/pcm_plugin.c w = snd_pcm_format_width(f); f 333 sound/core/oss/pcm_plugin.c badness += snd_pcm_format_unsigned(f) != unsignd; f 334 sound/core/oss/pcm_plugin.c badness += snd_pcm_format_big_endian(f) != big; f 336 sound/core/oss/pcm_plugin.c best_format = f; f 33 sound/core/seq/seq_fifo.c struct snd_seq_fifo *f; f 35 sound/core/seq/seq_fifo.c f = kzalloc(sizeof(*f), GFP_KERNEL); f 36 sound/core/seq/seq_fifo.c if (f == NULL) { f 41 sound/core/seq/seq_fifo.c f->pool = snd_seq_pool_new(poolsize); f 42 sound/core/seq/seq_fifo.c if (f->pool == NULL) { f 43 sound/core/seq/seq_fifo.c kfree(f); f 46 sound/core/seq/seq_fifo.c if (snd_seq_pool_init(f->pool) < 0) { f 47 sound/core/seq/seq_fifo.c snd_seq_pool_delete(&f->pool); f 48 sound/core/seq/seq_fifo.c kfree(f); f 52 sound/core/seq/seq_fifo.c spin_lock_init(&f->lock); f 53 sound/core/seq/seq_fifo.c snd_use_lock_init(&f->use_lock); f 54 sound/core/seq/seq_fifo.c init_waitqueue_head(&f->input_sleep); f 55 sound/core/seq/seq_fifo.c atomic_set(&f->overflow, 0); f 57 sound/core/seq/seq_fifo.c f->head = NULL; f 58 sound/core/seq/seq_fifo.c f->tail = NULL; f 59 sound/core/seq/seq_fifo.c f->cells = 0; f 61 sound/core/seq/seq_fifo.c return f; f 66 sound/core/seq/seq_fifo.c struct snd_seq_fifo *f; f 70 sound/core/seq/seq_fifo.c f = *fifo; f 71 sound/core/seq/seq_fifo.c if (snd_BUG_ON(!f)) f 75 sound/core/seq/seq_fifo.c snd_seq_fifo_clear(f); f 78 sound/core/seq/seq_fifo.c if (waitqueue_active(&f->input_sleep)) f 79 sound/core/seq/seq_fifo.c wake_up(&f->input_sleep); f 84 sound/core/seq/seq_fifo.c if (f->pool) { f 85 sound/core/seq/seq_fifo.c snd_seq_pool_done(f->pool); f 86 sound/core/seq/seq_fifo.c snd_seq_pool_delete(&f->pool); f 89 sound/core/seq/seq_fifo.c kfree(f); f 101 sound/core/seq/seq_fifo.c atomic_set(&f->overflow, 0); f 103 sound/core/seq/seq_fifo.c snd_use_lock_sync(&f->use_lock); f 104 sound/core/seq/seq_fifo.c spin_lock_irqsave(&f->lock, flags); f 106 sound/core/seq/seq_fifo.c while ((cell = fifo_cell_out(f)) != NULL) { f 109 sound/core/seq/seq_fifo.c spin_unlock_irqrestore(&f->lock, flags); f 121 sound/core/seq/seq_fifo.c if (snd_BUG_ON(!f)) f 124 sound/core/seq/seq_fifo.c snd_use_lock_use(&f->use_lock); f 125 sound/core/seq/seq_fifo.c err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ f 128 sound/core/seq/seq_fifo.c atomic_inc(&f->overflow); f 129 sound/core/seq/seq_fifo.c snd_use_lock_free(&f->use_lock); f 134 sound/core/seq/seq_fifo.c spin_lock_irqsave(&f->lock, flags); f 135 sound/core/seq/seq_fifo.c if (f->tail != NULL) f 136 sound/core/seq/seq_fifo.c f->tail->next = cell; f 137 sound/core/seq/seq_fifo.c f->tail = cell; f 138 sound/core/seq/seq_fifo.c if (f->head == NULL) f 139 sound/core/seq/seq_fifo.c f->head = cell; f 140 sound/core/seq/seq_fifo.c f->cells++; f 141 sound/core/seq/seq_fifo.c spin_unlock_irqrestore(&f->lock, flags); f 144 sound/core/seq/seq_fifo.c if (waitqueue_active(&f->input_sleep)) f 145 sound/core/seq/seq_fifo.c wake_up(&f->input_sleep); f 147 sound/core/seq/seq_fifo.c snd_use_lock_free(&f->use_lock); f 158 sound/core/seq/seq_fifo.c if ((cell = f->head) != NULL) { f 159 sound/core/seq/seq_fifo.c f->head = cell->next; f 162 sound/core/seq/seq_fifo.c if (f->tail == cell) f 163 sound/core/seq/seq_fifo.c f->tail = NULL; f 166 sound/core/seq/seq_fifo.c f->cells--; f 180 sound/core/seq/seq_fifo.c if (snd_BUG_ON(!f)) f 185 sound/core/seq/seq_fifo.c spin_lock_irqsave(&f->lock, flags); f 186 sound/core/seq/seq_fifo.c while ((cell = fifo_cell_out(f)) == NULL) { f 189 sound/core/seq/seq_fifo.c spin_unlock_irqrestore(&f->lock, flags); f 193 sound/core/seq/seq_fifo.c add_wait_queue(&f->input_sleep, &wait); f 194 sound/core/seq/seq_fifo.c spin_unlock_irq(&f->lock); f 196 sound/core/seq/seq_fifo.c spin_lock_irq(&f->lock); f 197 sound/core/seq/seq_fifo.c remove_wait_queue(&f->input_sleep, &wait); f 199 sound/core/seq/seq_fifo.c spin_unlock_irqrestore(&f->lock, flags); f 203 sound/core/seq/seq_fifo.c spin_unlock_irqrestore(&f->lock, flags); f 216 sound/core/seq/seq_fifo.c spin_lock_irqsave(&f->lock, flags); f 217 sound/core/seq/seq_fifo.c cell->next = f->head; f 218 sound/core/seq/seq_fifo.c f->head = cell; f 219 sound/core/seq/seq_fifo.c f->cells++; f 220 sound/core/seq/seq_fifo.c spin_unlock_irqrestore(&f->lock, flags); f 229 sound/core/seq/seq_fifo.c poll_wait(file, &f->input_sleep, wait); f 230 sound/core/seq/seq_fifo.c return (f->cells > 0); f 240 sound/core/seq/seq_fifo.c if (snd_BUG_ON(!f || !f->pool)) f 252 sound/core/seq/seq_fifo.c spin_lock_irqsave(&f->lock, flags); f 254 sound/core/seq/seq_fifo.c oldpool = f->pool; f 255 sound/core/seq/seq_fifo.c oldhead = f->head; f 257 sound/core/seq/seq_fifo.c f->pool = newpool; f 258 sound/core/seq/seq_fifo.c f->head = NULL; f 259 sound/core/seq/seq_fifo.c f->tail = NULL; f 260 sound/core/seq/seq_fifo.c f->cells = 0; f 262 sound/core/seq/seq_fifo.c spin_unlock_irqrestore(&f->lock, flags); f 59 sound/core/seq/seq_prioq.c struct snd_seq_prioq *f; f 61 sound/core/seq/seq_prioq.c f = kzalloc(sizeof(*f), GFP_KERNEL); f 62 sound/core/seq/seq_prioq.c if (f == NULL) { f 67 sound/core/seq/seq_prioq.c spin_lock_init(&f->lock); f 68 sound/core/seq/seq_prioq.c f->head = NULL; f 69 sound/core/seq/seq_prioq.c f->tail = NULL; f 70 sound/core/seq/seq_prioq.c f->cells = 0; f 72 sound/core/seq/seq_prioq.c return f; f 78 sound/core/seq/seq_prioq.c struct snd_seq_prioq *f = *fifo; f 81 sound/core/seq/seq_prioq.c if (f == NULL) { f 89 sound/core/seq/seq_prioq.c if (f->cells > 0) { f 91 sound/core/seq/seq_prioq.c while (f->cells > 0) f 92 sound/core/seq/seq_prioq.c snd_seq_cell_free(snd_seq_prioq_cell_out(f)); f 95 sound/core/seq/seq_prioq.c kfree(f); f 156 sound/core/seq/seq_prioq.c if (snd_BUG_ON(!f || !cell)) f 162 sound/core/seq/seq_prioq.c spin_lock_irqsave(&f->lock, flags); f 167 sound/core/seq/seq_prioq.c if (f->tail && !prior) { f 168 sound/core/seq/seq_prioq.c if (compare_timestamp(&cell->event, &f->tail->event)) { f 170 sound/core/seq/seq_prioq.c f->tail->next = cell; f 171 sound/core/seq/seq_prioq.c f->tail = cell; f 173 sound/core/seq/seq_prioq.c f->cells++; f 174 sound/core/seq/seq_prioq.c spin_unlock_irqrestore(&f->lock, flags); f 182 sound/core/seq/seq_prioq.c cur = f->head; /* cursor */ f 199 sound/core/seq/seq_prioq.c spin_unlock_irqrestore(&f->lock, flags); f 210 sound/core/seq/seq_prioq.c if (f->head == cur) /* this is the first cell, set head to it */ f 211 sound/core/seq/seq_prioq.c f->head = cell; f 213 sound/core/seq/seq_prioq.c f->tail = cell; f 214 sound/core/seq/seq_prioq.c f->cells++; f 215 sound/core/seq/seq_prioq.c spin_unlock_irqrestore(&f->lock, flags); f 225 sound/core/seq/seq_prioq.c if (f == NULL) { f 229 sound/core/seq/seq_prioq.c spin_lock_irqsave(&f->lock, flags); f 231 sound/core/seq/seq_prioq.c cell = f->head; f 233 sound/core/seq/seq_prioq.c f->head = cell->next; f 236 sound/core/seq/seq_prioq.c if (f->tail == cell) f 237 sound/core/seq/seq_prioq.c f->tail = NULL; f 240 sound/core/seq/seq_prioq.c f->cells--; f 243 sound/core/seq/seq_prioq.c spin_unlock_irqrestore(&f->lock, flags); f 250 sound/core/seq/seq_prioq.c if (f == NULL) { f 254 sound/core/seq/seq_prioq.c return f->cells; f 261 sound/core/seq/seq_prioq.c if (f == NULL) { f 265 sound/core/seq/seq_prioq.c return f->head; f 300 sound/core/seq/seq_prioq.c spin_lock_irqsave(&f->lock, flags); f 301 sound/core/seq/seq_prioq.c cell = f->head; f 306 sound/core/seq/seq_prioq.c if (cell == f->head) { f 307 sound/core/seq/seq_prioq.c f->head = cell->next; f 311 sound/core/seq/seq_prioq.c if (cell == f->tail) f 312 sound/core/seq/seq_prioq.c f->tail = cell->next; f 313 sound/core/seq/seq_prioq.c f->cells--; f 334 sound/core/seq/seq_prioq.c spin_unlock_irqrestore(&f->lock, flags); f 409 sound/core/seq/seq_prioq.c spin_lock_irqsave(&f->lock, flags); f 410 sound/core/seq/seq_prioq.c cell = f->head; f 418 sound/core/seq/seq_prioq.c if (cell == f->head) { f 419 sound/core/seq/seq_prioq.c f->head = cell->next; f 424 sound/core/seq/seq_prioq.c if (cell == f->tail) f 425 sound/core/seq/seq_prioq.c f->tail = cell->next; f 426 sound/core/seq/seq_prioq.c f->cells--; f 442 sound/core/seq/seq_prioq.c spin_unlock_irqrestore(&f->lock, flags); f 704 sound/oss/ac97_codec.c u16 f; f 756 sound/oss/ac97_codec.c f = codec->codec_read(codec, AC97_EXTENDED_STATUS); f 757 sound/oss/ac97_codec.c if((codec->codec_ops == &null_ops) && (f & 4)) f 336 sound/oss/dmabuf.c unsigned long flags,f ; f 367 sound/oss/dmabuf.c f=claim_dma_lock(); f 370 sound/oss/dmabuf.c release_dma_lock(f); f 641 sound/oss/dmabuf.c unsigned long f; f 648 sound/oss/dmabuf.c f=claim_dma_lock(); f 677 sound/oss/dmabuf.c release_dma_lock(f); f 1037 sound/oss/dmabuf.c unsigned long f; f 1039 sound/oss/dmabuf.c f=claim_dma_lock(); f 1047 sound/oss/dmabuf.c release_dma_lock(f); f 1139 sound/oss/dmabuf.c unsigned long f; f 1141 sound/oss/dmabuf.c f=claim_dma_lock(); f 1148 sound/oss/dmabuf.c release_dma_lock(f); f 91 sound/oss/msnd.c f->data = NULL; f 96 sound/oss/msnd.c vfree(f->data); f 97 sound/oss/msnd.c f->data = NULL; f 102 sound/oss/msnd.c msnd_fifo_free(f); f 103 sound/oss/msnd.c f->data = (char *)vmalloc(n); f 104 sound/oss/msnd.c f->n = n; f 105 sound/oss/msnd.c f->tail = 0; f 106 sound/oss/msnd.c f->head = 0; f 107 sound/oss/msnd.c f->len = 0; f 109 sound/oss/msnd.c if (!f->data) f 117 sound/oss/msnd.c f->len = f->tail = f->head = 0; f 124 sound/oss/msnd.c while ((count < len) && (f->len != f->n)) { f 128 sound/oss/msnd.c if (f->head <= f->tail) { f 130 sound/oss/msnd.c if (nwritten > f->n - f->tail) f 131 sound/oss/msnd.c nwritten = f->n - f->tail; f 134 sound/oss/msnd.c nwritten = f->head - f->tail; f 139 sound/oss/msnd.c memcpy_fromio(f->data + f->tail, buf, nwritten); f 143 sound/oss/msnd.c f->len += nwritten; f 144 sound/oss/msnd.c f->tail += nwritten; f 145 sound/oss/msnd.c f->tail %= f->n; f 155 sound/oss/msnd.c while ((count < len) && (f->len != f->n)) { f 159 sound/oss/msnd.c if (f->head <= f->tail) { f 161 sound/oss/msnd.c if (nwritten > f->n - f->tail) f 162 sound/oss/msnd.c nwritten = f->n - f->tail; f 165 sound/oss/msnd.c nwritten = f->head - f->tail; f 170 sound/oss/msnd.c memcpy(f->data + f->tail, buf, nwritten); f 174 sound/oss/msnd.c f->len += nwritten; f 175 sound/oss/msnd.c f->tail += nwritten; f 176 sound/oss/msnd.c f->tail %= f->n; f 186 sound/oss/msnd.c while ((count < len) && (f->len > 0)) { f 190 sound/oss/msnd.c if (f->tail <= f->head) { f 192 sound/oss/msnd.c if (nread > f->n - f->head) f 193 sound/oss/msnd.c nread = f->n - f->head; f 196 sound/oss/msnd.c nread = f->tail - f->head; f 201 sound/oss/msnd.c memcpy_toio(buf, f->data + f->head, nread); f 205 sound/oss/msnd.c f->len -= nread; f 206 sound/oss/msnd.c f->head += nread; f 207 sound/oss/msnd.c f->head %= f->n; f 217 sound/oss/msnd.c while ((count < len) && (f->len > 0)) { f 221 sound/oss/msnd.c if (f->tail <= f->head) { f 223 sound/oss/msnd.c if (nread > f->n - f->head) f 224 sound/oss/msnd.c nread = f->n - f->head; f 227 sound/oss/msnd.c nread = f->tail - f->head; f 232 sound/oss/msnd.c memcpy(buf, f->data + f->head, nread); f 236 sound/oss/msnd.c f->len -= nread; f 237 sound/oss/msnd.c f->head += nread; f 238 sound/oss/msnd.c f->head %= f->n; f 679 sound/oss/opl3.c int f, octave; f 688 sound/oss/opl3.c f = freq; f 692 sound/oss/opl3.c if (f == 0) f 694 sound/oss/opl3.c else if (f < 261) f 696 sound/oss/opl3.c while (f < 261) f 699 sound/oss/opl3.c f <<= 1; f 702 sound/oss/opl3.c else if (f > 493) f 704 sound/oss/opl3.c while (f > 493) f 707 sound/oss/opl3.c f >>= 1; f 191 sound/oss/swarm_cs4297a.c #define SERDMA_NEXTBUF(d,f) (((d)->f+1) % (d)->ringsz) f 846 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xfffbffff) | ((f & 1) << 0x12); f 849 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xf7ffffff) | ((f & 1) << 0x1b); f 850 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xefffffff) | ((f & 1) << 0x1c); f 853 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xfeffffff) | ((f & 1) << 0x18); f 854 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xfdffffff) | ((f & 1) << 0x19); f 860 sound/pci/au88x0/au88x0_core.c temp = ((f & 1) << 0x12) | (temp & 0xfffbffef); f 864 sound/pci/au88x0/au88x0_core.c ((f & 1) << 0x1b) | (temp & 0xe7ffffef) | FIFO_BITS; f 868 sound/pci/au88x0/au88x0_core.c ((f & 1) << 0x18) | (temp & 0xfcffffef) | FIFO_BITS; f 938 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xfffbffff) | ((f & 1) << 0x12); f 941 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xf7ffffff) | ((f & 1) << 0x1b); f 942 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xefffffff) | ((f & 1) << 0x1c); f 945 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xfeffffff) | ((f & 1) << 0x18); f 946 sound/pci/au88x0/au88x0_core.c temp = (temp & 0xfdffffff) | ((f & 1) << 0x19); f 952 sound/pci/au88x0/au88x0_core.c temp = ((f & 1) << 0x12) | (temp & 0xfffbffef); f 956 sound/pci/au88x0/au88x0_core.c ((f & 1) << 0x1b) | (temp & 0xe7ffffef) | FIFO_BITS; f 960 sound/pci/au88x0/au88x0_core.c ((f & 1) << 0x18) | (temp & 0xfcffffef) | FIFO_BITS; f 79 sound/pci/echoaudio/echoaudio.c struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); f 88 sound/pci/echoaudio/echoaudio.c return snd_mask_refine(f, &fmt); f 94 sound/pci/echoaudio/echoaudio.c return snd_mask_refine(f, &fmt); f 107 sound/pci/echoaudio/echoaudio.c struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); f 113 sound/pci/echoaudio/echoaudio.c if (f->bits[0] == SNDRV_PCM_FMTBIT_S32_BE) { f 124 sound/pci/echoaudio/echoaudio.c if (f->bits[0] == SNDRV_PCM_FMTBIT_U8) { f 141 sound/pci/echoaudio/echoaudio.c struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); f 166 sound/pci/echoaudio/echoaudio.c return snd_mask_refine(f, &fmt); f 176 sound/pci/echoaudio/echoaudio.c struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); f 182 sound/pci/echoaudio/echoaudio.c fmask = f->bits[0] + ((u64)f->bits[1] << 32); f 256 sound/pci/riptide/riptide.c #define SEND_SETF(p,b,c,d,e,f,g) sendcmd(p,PARM,SETF|WORD1(b)|BYTE3(c),d|BYTE1(e)|BYTE2(f)|BYTE3(g),RET(0)) /* set sample format at mixer */ f 263 sound/pci/riptide/riptide.c #define SEND_LSEL(p,b,c,d,e,f,g,h) sendcmd(p,PARM,LSEL|BYTE1(b)|BYTE2(c)|BYTE3(d),BYTE0(e)|BYTE1(f)|BYTE2(g)|BYTE3(h),RET(0)) /* select paths for internal connections */ f 274 sound/pci/riptide/riptide.c #define SEND_TXAC(p,b,c,d,e,f) sendcmd(p,PARM,TXAC|BYTE1(b)|WORD2(c),WORD0(d)|BYTE2(e)|BYTE3(f),RET(0)) f 1520 sound/pci/riptide/riptide.c unsigned int i, j, size, pages, f, pt, period; f 1525 sound/pci/riptide/riptide.c f = PAGE_SIZE; f 1526 sound/pci/riptide/riptide.c while ((size + (f >> 1) - 1) <= (f << 7) && (f << 1) > period) f 1527 sound/pci/riptide/riptide.c f = f >> 1; f 1528 sound/pci/riptide/riptide.c pages = (size + f - 1) / f; f 1533 sound/pci/riptide/riptide.c size, pages, f, period); f 1548 sound/pci/riptide/riptide.c pt = (pt + f) % PAGE_SIZE; f 1551 sound/pci/riptide/riptide.c c->dwSegLen = cpu_to_le32(f); f 1556 sound/pci/riptide/riptide.c size -= f; f 108 sound/ppc/beep.c int i, j, f; f 152 sound/ppc/beep.c f = ncycles * 65536 / nsamples; f 157 sound/ppc/beep.c j = (j + f) & 0xffff; f 2000 sound/sparc/dbri.c struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); f 2006 sound/sparc/dbri.c return snd_mask_refine(f, &fmt); f 2016 sound/sparc/dbri.c struct snd_mask *f = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); f 2020 sound/sparc/dbri.c if (!(f->bits[0] & SNDRV_PCM_FMTBIT_S16_BE)) { f 444 sound/usb/usbaudio.c unsigned int f; f 449 sound/usb/usbaudio.c f = combine_triple((u8*)urb->transfer_buffer) << 2; f 450 sound/usb/usbaudio.c if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) { f 452 sound/usb/usbaudio.c subs->freqm = f; f 470 sound/usb/usbaudio.c unsigned int f; f 475 sound/usb/usbaudio.c f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff; f 476 sound/usb/usbaudio.c if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) { f 478 sound/usb/usbaudio.c subs->freqm = f; f 496 sound/usb/usbaudio.c unsigned int f; f 501 sound/usb/usbaudio.c f = combine_quad((u8*)urb->transfer_buffer) & 0x0fffffff; f 502 sound/usb/usbaudio.c f >>= subs->datainterval; f 503 sound/usb/usbaudio.c if (f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax) { f 505 sound/usb/usbaudio.c subs->freqm = f; f 1775 sound/usb/usbaudio.c struct audioformat *f; f 1776 sound/usb/usbaudio.c f = list_entry(p, struct audioformat, list); f 1778 sound/usb/usbaudio.c if (f->channels > 32) f 1781 sound/usb/usbaudio.c if (f->rates & SNDRV_PCM_RATE_CONTINUOUS) { f 1782 sound/usb/usbaudio.c if (rate_min && f->rate_min != rate_min) f 1784 sound/usb/usbaudio.c if (rate_max && f->rate_max != rate_max) f 1786 sound/usb/usbaudio.c rate_min = f->rate_min; f 1787 sound/usb/usbaudio.c rate_max = f->rate_max; f 1790 sound/usb/usbaudio.c if (rates[f->format] & SNDRV_PCM_RATE_CONTINUOUS) { f 1791 sound/usb/usbaudio.c if (f->rates != rates[f->format]) f 1794 sound/usb/usbaudio.c if (f->rates & SNDRV_PCM_RATE_CONTINUOUS) { f 1795 sound/usb/usbaudio.c if (rates[f->format] && rates[f->format] != f->rates) f 1798 sound/usb/usbaudio.c channels[f->format] |= (1 << f->channels); f 1799 sound/usb/usbaudio.c rates[f->format] |= f->rates; f 1801 sound/usb/usbaudio.c if (f->rates & SNDRV_PCM_RATE_KNOT) f 1819 sound/usb/usbaudio.c struct audioformat *f; f 1820 sound/usb/usbaudio.c f = list_entry(p, struct audioformat, list); f 1821 sound/usb/usbaudio.c if (f->rates & SNDRV_PCM_RATE_CONTINUOUS) f 1824 sound/usb/usbaudio.c if (f->rates & (1 << i)) f 1825 sound/usb/usbaudio.c channels[i] |= (1 << f->channels);