k 164 arch/x86/kernel/cpu/cpufreq/e_powersaver.c int k, step, voltage;
k 275 arch/x86/kernel/cpu/cpufreq/e_powersaver.c k = 0;
k 279 arch/x86/kernel/cpu/cpufreq/e_powersaver.c voltage = (k * step) / 256 + min_voltage;
k 280 arch/x86/kernel/cpu/cpufreq/e_powersaver.c f_table[k].frequency = fsb * i;
k 281 arch/x86/kernel/cpu/cpufreq/e_powersaver.c f_table[k].index = (i << 8) | voltage;
k 282 arch/x86/kernel/cpu/cpufreq/e_powersaver.c k++;
k 284 arch/x86/kernel/cpu/cpufreq/e_powersaver.c f_table[k].frequency = CPUFREQ_TABLE_END;
k 429 arch/x86/kernel/cpu/cpufreq/longhaul.c unsigned int i, j, k = 0;
k 490 arch/x86/kernel/cpu/cpufreq/longhaul.c longhaul_table[k].frequency = calc_speed(ratio);
k 491 arch/x86/kernel/cpu/cpufreq/longhaul.c longhaul_table[k].index = j;
k 492 arch/x86/kernel/cpu/cpufreq/longhaul.c k++;
k 494 arch/x86/kernel/cpu/cpufreq/longhaul.c if (k <= 1) {
k 499 arch/x86/kernel/cpu/cpufreq/longhaul.c for (j = 0; j < k - 1; j++) {
k 503 arch/x86/kernel/cpu/cpufreq/longhaul.c for (i = j + 1; i < k; i++) {
k 520 arch/x86/kernel/cpu/cpufreq/longhaul.c longhaul_table[k].frequency = CPUFREQ_TABLE_END;
k 523 arch/x86/kernel/cpu/cpufreq/longhaul.c for (j = 0; j < k; j++) {
k 488 arch/x86/kernel/cpu/cpufreq/powernow-k7.c unsigned int k;
k 490 arch/x86/kernel/cpu/cpufreq/powernow-k7.c for (k=0; k<number_scales; k++)
k 476 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c unsigned int j, k, first_cpu, tmp;
k 551 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c for_each_cpu_mask_nr(k, *online_policy_cpus) {
k 552 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c freqs.cpu = k;
k 574 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c for_each_cpu_mask_nr(k, *online_policy_cpus) {
k 575 arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c freqs.cpu = k;
k 402 arch/x86/kernel/cpu/intel_cacheinfo.c unsigned char k = 0;
k 405 arch/x86/kernel/cpu/intel_cacheinfo.c while (cache_table[k].descriptor != 0)
k 407 arch/x86/kernel/cpu/intel_cacheinfo.c if (cache_table[k].descriptor == des) {
k 408 arch/x86/kernel/cpu/intel_cacheinfo.c if (only_trace && cache_table[k].cache_type != LVL_TRACE)
k 410 arch/x86/kernel/cpu/intel_cacheinfo.c switch (cache_table[k].cache_type) {
k 412 arch/x86/kernel/cpu/intel_cacheinfo.c l1i += cache_table[k].size;
k 415 arch/x86/kernel/cpu/intel_cacheinfo.c l1d += cache_table[k].size;
k 418 arch/x86/kernel/cpu/intel_cacheinfo.c l2 += cache_table[k].size;
k 421 arch/x86/kernel/cpu/intel_cacheinfo.c l3 += cache_table[k].size;
k 424 arch/x86/kernel/cpu/intel_cacheinfo.c trace += cache_table[k].size;
k 431 arch/x86/kernel/cpu/intel_cacheinfo.c k++;
k 664 arch/x86/kernel/cpu/intel_cacheinfo.c #define to_object(k) container_of(k, struct _index_kobject, kobj)
k 358 arch/x86/kernel/cpu/mcheck/mce_amd_64.c #define to_block(k) container_of(k, struct threshold_block, kobj)
k 47 arch/x86/kernel/dumpstack_64.c unsigned k;
k 53 arch/x86/kernel/dumpstack_64.c for (k = 0; k < N_EXCEPTION_STACKS; k++) {
k 54 arch/x86/kernel/dumpstack_64.c unsigned long end = per_cpu(orig_ist, cpu).ist[k];
k 72 arch/x86/kernel/dumpstack_64.c if (*usedp & (1U << k))
k 74 arch/x86/kernel/dumpstack_64.c *usedp |= 1U << k;
k 75 arch/x86/kernel/dumpstack_64.c *idp = ids[k];
k 84 arch/x86/kernel/dumpstack_64.c if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
k 29 arch/x86/kernel/early_printk.c int i, k, j;
k 34 arch/x86/kernel/early_printk.c for (k = 1, j = 0; k < max_ypos; k++, j++) {
k 36 arch/x86/kernel/early_printk.c writew(readw(VGABASE+2*(max_xpos*k+i)),
k 421 arch/x86/kernel/ptrace.c unsigned long *k = kbuf;
k 423 arch/x86/kernel/ptrace.c *k++ = getreg(target, pos);
k 424 arch/x86/kernel/ptrace.c count -= sizeof(*k);
k 425 arch/x86/kernel/ptrace.c pos += sizeof(*k);
k 447 arch/x86/kernel/ptrace.c const unsigned long *k = kbuf;
k 449 arch/x86/kernel/ptrace.c ret = putreg(target, pos, *k++);
k 450 arch/x86/kernel/ptrace.c count -= sizeof(*k);
k 451 arch/x86/kernel/ptrace.c pos += sizeof(*k);
k 1281 arch/x86/kernel/ptrace.c compat_ulong_t *k = kbuf;
k 1283 arch/x86/kernel/ptrace.c getreg32(target, pos, k++);
k 1284 arch/x86/kernel/ptrace.c count -= sizeof(*k);
k 1285 arch/x86/kernel/ptrace.c pos += sizeof(*k);
k 1309 arch/x86/kernel/ptrace.c const compat_ulong_t *k = kbuf;
k 1311 arch/x86/kernel/ptrace.c ret = putreg32(target, pos, *k++);
k 1312 arch/x86/kernel/ptrace.c count -= sizeof(*k);
k 1313 arch/x86/kernel/ptrace.c pos += sizeof(*k);
k 2391 arch/x86/kvm/mmu.c int i, j, k;
k 2408 arch/x86/kvm/mmu.c for (k = 0; k < RMAP_EXT; ++k)
k 2409 arch/x86/kvm/mmu.c if (d->shadow_ptes[k])
k 119 arch/x86/mm/pageattr-test.c int i, k;
k 149 arch/x86/mm/pageattr-test.c for (k = 0; k < len[i]; k++) {
k 150 arch/x86/mm/pageattr-test.c pte = lookup_address(addr[i] + k*PAGE_SIZE, &level);
k 156 arch/x86/mm/pageattr-test.c if (k == 0) {
k 161 arch/x86/mm/pageattr-test.c len[i] = k;
k 165 arch/x86/mm/pageattr-test.c if (test_bit(pfn + k, bm)) {
k 166 arch/x86/mm/pageattr-test.c len[i] = k;
k 169 arch/x86/mm/pageattr-test.c __set_bit(pfn + k, bm);
k 171 arch/x86/mm/pageattr-test.c if (!addr[i] || !pte || !k) {
k 43 arch/x86/oprofile/op_model_amd.c #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
k 39 arch/x86/oprofile/op_model_ppro.c #define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
k 1570 block/cfq-iosched.c void *k;
k 1592 block/cfq-iosched.c k = cic->key;
k 1593 block/cfq-iosched.c if (unlikely(!k)) {
k 333 crypto/aes_generic.c crypto_ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \
k 337 crypto/aes_generic.c f_rn(bo, bi, 0, k); \
k 338 crypto/aes_generic.c f_rn(bo, bi, 1, k); \
k 339 crypto/aes_generic.c f_rn(bo, bi, 2, k); \
k 340 crypto/aes_generic.c f_rn(bo, bi, 3, k); \
k 341 crypto/aes_generic.c k += 4; \
k 348 crypto/aes_generic.c crypto_fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \
k 352 crypto/aes_generic.c f_rl(bo, bi, 0, k); \
k 353 crypto/aes_generic.c f_rl(bo, bi, 1, k); \
k 354 crypto/aes_generic.c f_rl(bo, bi, 2, k); \
k 355 crypto/aes_generic.c f_rl(bo, bi, 3, k); \
k 405 crypto/aes_generic.c crypto_it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \
k 409 crypto/aes_generic.c i_rn(bo, bi, 0, k); \
k 410 crypto/aes_generic.c i_rn(bo, bi, 1, k); \
k 411 crypto/aes_generic.c i_rn(bo, bi, 2, k); \
k 412 crypto/aes_generic.c i_rn(bo, bi, 3, k); \
k 413 crypto/aes_generic.c k += 4; \
k 420 crypto/aes_generic.c crypto_il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \
k 424 crypto/aes_generic.c i_rl(bo, bi, 0, k); \
k 425 crypto/aes_generic.c i_rl(bo, bi, 1, k); \
k 426 crypto/aes_generic.c i_rl(bo, bi, 2, k); \
k 427 crypto/aes_generic.c i_rl(bo, bi, 3, k); \
k 31 crypto/arc4.c int i, j = 0, k = 0;
k 42 crypto/arc4.c j = (j + in_key[k] + a) & 0xff;
k 45 crypto/arc4.c if(++k >= key_len)
k 46 crypto/arc4.c k = 0;
k 711 crypto/cast5.c k[0] = s5[zi(8)] ^ s6[zi(9)] ^ s7[zi(7)] ^ sb8[zi(6)] ^ s5[zi(2)];
k 712 crypto/cast5.c k[1] = s5[zi(10)] ^ s6[zi(11)] ^ s7[zi(5)] ^ sb8[zi(4)] ^
k 714 crypto/cast5.c k[2] = s5[zi(12)] ^ s6[zi(13)] ^ s7[zi(3)] ^ sb8[zi(2)] ^
k 716 crypto/cast5.c k[3] = s5[zi(14)] ^ s6[zi(15)] ^ s7[zi(1)] ^ sb8[zi(0)] ^
k 727 crypto/cast5.c k[4] = s5[xi(3)] ^ s6[xi(2)] ^ s7[xi(12)] ^ sb8[xi(13)] ^
k 729 crypto/cast5.c k[5] = s5[xi(1)] ^ s6[xi(0)] ^ s7[xi(14)] ^ sb8[xi(15)] ^
k 731 crypto/cast5.c k[6] = s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(8)] ^ sb8[xi(9)] ^ s7[xi(3)];
k 732 crypto/cast5.c k[7] = s5[xi(5)] ^ s6[xi(4)] ^ s7[xi(10)] ^ sb8[xi(11)] ^
k 743 crypto/cast5.c k[8] = s5[zi(3)] ^ s6[zi(2)] ^ s7[zi(12)] ^ sb8[zi(13)] ^
k 745 crypto/cast5.c k[9] = s5[zi(1)] ^ s6[zi(0)] ^ s7[zi(14)] ^ sb8[zi(15)] ^
k 747 crypto/cast5.c k[10] = s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(8)] ^ sb8[zi(9)] ^ s7[zi(2)];
k 748 crypto/cast5.c k[11] = s5[zi(5)] ^ s6[zi(4)] ^ s7[zi(10)] ^ sb8[zi(11)] ^
k 759 crypto/cast5.c k[12] = s5[xi(8)] ^ s6[xi(9)] ^ s7[xi(7)] ^ sb8[xi(6)] ^ s5[xi(3)];
k 760 crypto/cast5.c k[13] = s5[xi(10)] ^ s6[xi(11)] ^ s7[xi(5)] ^ sb8[xi(4)] ^
k 762 crypto/cast5.c k[14] = s5[xi(12)] ^ s6[xi(13)] ^ s7[xi(3)] ^ sb8[xi(2)] ^
k 764 crypto/cast5.c k[15] = s5[xi(14)] ^ s6[xi(15)] ^ s7[xi(1)] ^ sb8[xi(0)] ^
k 778 crypto/cast5.c u32 k[16];
k 792 crypto/cast5.c key_schedule(x, z, k);
k 794 crypto/cast5.c c->Km[i] = k[i];
k 795 crypto/cast5.c key_schedule(x, z, k);
k 797 crypto/cast5.c c->Kr[i] = k[i] & 0x1f;
k 637 crypto/des_generic.c d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
k 638 crypto/des_generic.c c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
k 639 crypto/des_generic.c b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
k 640 crypto/des_generic.c a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
k 665 crypto/des_generic.c d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
k 666 crypto/des_generic.c c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
k 667 crypto/des_generic.c b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
k 668 crypto/des_generic.c a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
k 720 crypto/des_generic.c d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
k 721 crypto/des_generic.c c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
k 722 crypto/des_generic.c b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
k 723 crypto/des_generic.c a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
k 745 crypto/des_generic.c d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
k 746 crypto/des_generic.c c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
k 747 crypto/des_generic.c b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
k 748 crypto/des_generic.c a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
k 68 crypto/fcrypt.c k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \
k 311 crypto/fcrypt.c u64 k; /* k holds all 56 non-parity bits */
k 314 crypto/fcrypt.c k = (*key++) >> 1;
k 315 crypto/fcrypt.c k <<= 7;
k 316 crypto/fcrypt.c k |= (*key++) >> 1;
k 317 crypto/fcrypt.c k <<= 7;
k 318 crypto/fcrypt.c k |= (*key++) >> 1;
k 319 crypto/fcrypt.c k <<= 7;
k 320 crypto/fcrypt.c k |= (*key++) >> 1;
k 321 crypto/fcrypt.c k <<= 7;
k 322 crypto/fcrypt.c k |= (*key++) >> 1;
k 323 crypto/fcrypt.c k <<= 7;
k 324 crypto/fcrypt.c k |= (*key++) >> 1;
k 325 crypto/fcrypt.c k <<= 7;
k 326 crypto/fcrypt.c k |= (*key++) >> 1;
k 327 crypto/fcrypt.c k <<= 7;
k 328 crypto/fcrypt.c k |= (*key) >> 1;
k 331 crypto/fcrypt.c ctx->sched[0x0] = cpu_to_be32(k); ror56_64(k, 11);
k 332 crypto/fcrypt.c ctx->sched[0x1] = cpu_to_be32(k); ror56_64(k, 11);
k 333 crypto/fcrypt.c ctx->sched[0x2] = cpu_to_be32(k); ror56_64(k, 11);
k 334 crypto/fcrypt.c ctx->sched[0x3] = cpu_to_be32(k); ror56_64(k, 11);
k 335 crypto/fcrypt.c ctx->sched[0x4] = cpu_to_be32(k); ror56_64(k, 11);
k 336 crypto/fcrypt.c ctx->sched[0x5] = cpu_to_be32(k); ror56_64(k, 11);
k 337 crypto/fcrypt.c ctx->sched[0x6] = cpu_to_be32(k); ror56_64(k, 11);
k 338 crypto/fcrypt.c ctx->sched[0x7] = cpu_to_be32(k); ror56_64(k, 11);
k 339 crypto/fcrypt.c ctx->sched[0x8] = cpu_to_be32(k); ror56_64(k, 11);
k 340 crypto/fcrypt.c ctx->sched[0x9] = cpu_to_be32(k); ror56_64(k, 11);
k 341 crypto/fcrypt.c ctx->sched[0xa] = cpu_to_be32(k); ror56_64(k, 11);
k 342 crypto/fcrypt.c ctx->sched[0xb] = cpu_to_be32(k); ror56_64(k, 11);
k 343 crypto/fcrypt.c ctx->sched[0xc] = cpu_to_be32(k); ror56_64(k, 11);
k 344 crypto/fcrypt.c ctx->sched[0xd] = cpu_to_be32(k); ror56_64(k, 11);
k 345 crypto/fcrypt.c ctx->sched[0xe] = cpu_to_be32(k); ror56_64(k, 11);
k 346 crypto/fcrypt.c ctx->sched[0xf] = cpu_to_be32(k);
k 269 crypto/gf128mul.c int i, j, k;
k 290 crypto/gf128mul.c for (k = 1; k < j; ++k)
k 291 crypto/gf128mul.c be128_xor(&t->t[i]->t[j + k],
k 292 crypto/gf128mul.c &t->t[i]->t[j], &t->t[i]->t[k]);
k 311 crypto/gf128mul.c int i, j, k;
k 332 crypto/gf128mul.c for (k = 1; k < j; ++k)
k 333 crypto/gf128mul.c be128_xor(&t->t[i]->t[j + k],
k 334 crypto/gf128mul.c &t->t[i]->t[j], &t->t[i]->t[k]);
k 405 crypto/gf128mul.c int j, k;
k 416 crypto/gf128mul.c for (k = 1; k < j; ++k)
k 417 crypto/gf128mul.c be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
k 427 crypto/gf128mul.c int j, k;
k 438 crypto/gf128mul.c for (k = 1; k < j; ++k)
k 439 crypto/gf128mul.c be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
k 62 crypto/md4.c #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s))
k 63 crypto/md4.c #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
k 64 crypto/md4.c #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
k 47 crypto/rmd128.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
k 50 crypto/rmd160.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
k 47 crypto/rmd256.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
k 50 crypto/rmd320.c (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
k 114 crypto/salsa20_generic.c ctx->input[1] = U8TO32_LITTLE(k + 0);
k 115 crypto/salsa20_generic.c ctx->input[2] = U8TO32_LITTLE(k + 4);
k 116 crypto/salsa20_generic.c ctx->input[3] = U8TO32_LITTLE(k + 8);
k 117 crypto/salsa20_generic.c ctx->input[4] = U8TO32_LITTLE(k + 12);
k 119 crypto/salsa20_generic.c k += 16;
k 124 crypto/salsa20_generic.c ctx->input[11] = U8TO32_LITTLE(k + 0);
k 125 crypto/salsa20_generic.c ctx->input[12] = U8TO32_LITTLE(k + 4);
k 126 crypto/salsa20_generic.c ctx->input[13] = U8TO32_LITTLE(k + 8);
k 127 crypto/salsa20_generic.c ctx->input[14] = U8TO32_LITTLE(k + 12);
k 37 crypto/serpent.c b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b,11); k[j] = b;
k 40 crypto/serpent.c x0=k[i]; x1=k[i+1]; x2=k[i+2]; x3=k[i+3];
k 43 crypto/serpent.c k[i]=x0; k[i+1]=x1; k[i+2]=x2; k[i+3]=x3;
k 46 crypto/serpent.c x3 ^= k[4*(i)+3]; x2 ^= k[4*(i)+2]; \
k 47 crypto/serpent.c x1 ^= k[4*(i)+1]; x0 ^= k[4*(i)+0];
k 56 crypto/serpent.c x0 ^= x3; x2 ^= x4; x3 ^= k[4*i+3]; \
k 57 crypto/serpent.c x1 ^= k[4*i+1]; x0=rol32(x0,5); x2=rol32(x2,22);\
k 58 crypto/serpent.c x0 ^= k[4*i+0]; x2 ^= k[4*i+2];
k 61 crypto/serpent.c x0 ^= k[4*i+0]; x1 ^= k[4*i+1]; x2 ^= k[4*i+2]; \
k 62 crypto/serpent.c x3 ^= k[4*i+3]; x0=ror32(x0,5); x2=ror32(x2,22);\
k 222 crypto/serpent.c u32 *k = ctx->expkey;
k 223 crypto/serpent.c u8 *k8 = (u8 *)k;
k 238 crypto/serpent.c r0 = le32_to_cpu(k[3]);
k 239 crypto/serpent.c r1 = le32_to_cpu(k[4]);
k 240 crypto/serpent.c r2 = le32_to_cpu(k[5]);
k 241 crypto/serpent.c r3 = le32_to_cpu(k[6]);
k 242 crypto/serpent.c r4 = le32_to_cpu(k[7]);
k 244 crypto/serpent.c keyiter(le32_to_cpu(k[0]),r0,r4,r2,0,0);
k 245 crypto/serpent.c keyiter(le32_to_cpu(k[1]),r1,r0,r3,1,1);
k 246 crypto/serpent.c keyiter(le32_to_cpu(k[2]),r2,r1,r4,2,2);
k 247 crypto/serpent.c keyiter(le32_to_cpu(k[3]),r3,r2,r0,3,3);
k 248 crypto/serpent.c keyiter(le32_to_cpu(k[4]),r4,r3,r1,4,4);
k 249 crypto/serpent.c keyiter(le32_to_cpu(k[5]),r0,r4,r2,5,5);
k 250 crypto/serpent.c keyiter(le32_to_cpu(k[6]),r1,r0,r3,6,6);
k 251 crypto/serpent.c keyiter(le32_to_cpu(k[7]),r2,r1,r4,7,7);
k 253 crypto/serpent.c keyiter(k[ 0],r3,r2,r0, 8, 8); keyiter(k[ 1],r4,r3,r1, 9, 9);
k 254 crypto/serpent.c keyiter(k[ 2],r0,r4,r2, 10, 10); keyiter(k[ 3],r1,r0,r3, 11, 11);
k 255 crypto/serpent.c keyiter(k[ 4],r2,r1,r4, 12, 12); keyiter(k[ 5],r3,r2,r0, 13, 13);
k 256 crypto/serpent.c keyiter(k[ 6],r4,r3,r1, 14, 14); keyiter(k[ 7],r0,r4,r2, 15, 15);
k 257 crypto/serpent.c keyiter(k[ 8],r1,r0,r3, 16, 16); keyiter(k[ 9],r2,r1,r4, 17, 17);
k 258 crypto/serpent.c keyiter(k[ 10],r3,r2,r0, 18, 18); keyiter(k[ 11],r4,r3,r1, 19, 19);
k 259 crypto/serpent.c keyiter(k[ 12],r0,r4,r2, 20, 20); keyiter(k[ 13],r1,r0,r3, 21, 21);
k 260 crypto/serpent.c keyiter(k[ 14],r2,r1,r4, 22, 22); keyiter(k[ 15],r3,r2,r0, 23, 23);
k 261 crypto/serpent.c keyiter(k[ 16],r4,r3,r1, 24, 24); keyiter(k[ 17],r0,r4,r2, 25, 25);
k 262 crypto/serpent.c keyiter(k[ 18],r1,r0,r3, 26, 26); keyiter(k[ 19],r2,r1,r4, 27, 27);
k 263 crypto/serpent.c keyiter(k[ 20],r3,r2,r0, 28, 28); keyiter(k[ 21],r4,r3,r1, 29, 29);
k 264 crypto/serpent.c keyiter(k[ 22],r0,r4,r2, 30, 30); keyiter(k[ 23],r1,r0,r3, 31, 31);
k 266 crypto/serpent.c k += 50;
k 268 crypto/serpent.c keyiter(k[-26],r2,r1,r4, 32,-18); keyiter(k[-25],r3,r2,r0, 33,-17);
k 269 crypto/serpent.c keyiter(k[-24],r4,r3,r1, 34,-16); keyiter(k[-23],r0,r4,r2, 35,-15);
k 270 crypto/serpent.c keyiter(k[-22],r1,r0,r3, 36,-14); keyiter(k[-21],r2,r1,r4, 37,-13);
k 271 crypto/serpent.c keyiter(k[-20],r3,r2,r0, 38,-12); keyiter(k[-19],r4,r3,r1, 39,-11);
k 272 crypto/serpent.c keyiter(k[-18],r0,r4,r2, 40,-10); keyiter(k[-17],r1,r0,r3, 41, -9);
k 273 crypto/serpent.c keyiter(k[-16],r2,r1,r4, 42, -8); keyiter(k[-15],r3,r2,r0, 43, -7);
k 274 crypto/serpent.c keyiter(k[-14],r4,r3,r1, 44, -6); keyiter(k[-13],r0,r4,r2, 45, -5);
k 275 crypto/serpent.c keyiter(k[-12],r1,r0,r3, 46, -4); keyiter(k[-11],r2,r1,r4, 47, -3);
k 276 crypto/serpent.c keyiter(k[-10],r3,r2,r0, 48, -2); keyiter(k[ -9],r4,r3,r1, 49, -1);
k 277 crypto/serpent.c keyiter(k[ -8],r0,r4,r2, 50, 0); keyiter(k[ -7],r1,r0,r3, 51, 1);
k 278 crypto/serpent.c keyiter(k[ -6],r2,r1,r4, 52, 2); keyiter(k[ -5],r3,r2,r0, 53, 3);
k 279 crypto/serpent.c keyiter(k[ -4],r4,r3,r1, 54, 4); keyiter(k[ -3],r0,r4,r2, 55, 5);
k 280 crypto/serpent.c keyiter(k[ -2],r1,r0,r3, 56, 6); keyiter(k[ -1],r2,r1,r4, 57, 7);
k 281 crypto/serpent.c keyiter(k[ 0],r3,r2,r0, 58, 8); keyiter(k[ 1],r4,r3,r1, 59, 9);
k 282 crypto/serpent.c keyiter(k[ 2],r0,r4,r2, 60, 10); keyiter(k[ 3],r1,r0,r3, 61, 11);
k 283 crypto/serpent.c keyiter(k[ 4],r2,r1,r4, 62, 12); keyiter(k[ 5],r3,r2,r0, 63, 13);
k 284 crypto/serpent.c keyiter(k[ 6],r4,r3,r1, 64, 14); keyiter(k[ 7],r0,r4,r2, 65, 15);
k 285 crypto/serpent.c keyiter(k[ 8],r1,r0,r3, 66, 16); keyiter(k[ 9],r2,r1,r4, 67, 17);
k 286 crypto/serpent.c keyiter(k[ 10],r3,r2,r0, 68, 18); keyiter(k[ 11],r4,r3,r1, 69, 19);
k 287 crypto/serpent.c keyiter(k[ 12],r0,r4,r2, 70, 20); keyiter(k[ 13],r1,r0,r3, 71, 21);
k 288 crypto/serpent.c keyiter(k[ 14],r2,r1,r4, 72, 22); keyiter(k[ 15],r3,r2,r0, 73, 23);
k 289 crypto/serpent.c keyiter(k[ 16],r4,r3,r1, 74, 24); keyiter(k[ 17],r0,r4,r2, 75, 25);
k 290 crypto/serpent.c keyiter(k[ 18],r1,r0,r3, 76, 26); keyiter(k[ 19],r2,r1,r4, 77, 27);
k 291 crypto/serpent.c keyiter(k[ 20],r3,r2,r0, 78, 28); keyiter(k[ 21],r4,r3,r1, 79, 29);
k 292 crypto/serpent.c keyiter(k[ 22],r0,r4,r2, 80, 30); keyiter(k[ 23],r1,r0,r3, 81, 31);
k 294 crypto/serpent.c k += 50;
k 296 crypto/serpent.c keyiter(k[-26],r2,r1,r4, 82,-18); keyiter(k[-25],r3,r2,r0, 83,-17);
k 297 crypto/serpent.c keyiter(k[-24],r4,r3,r1, 84,-16); keyiter(k[-23],r0,r4,r2, 85,-15);
k 298 crypto/serpent.c keyiter(k[-22],r1,r0,r3, 86,-14); keyiter(k[-21],r2,r1,r4, 87,-13);
k 299 crypto/serpent.c keyiter(k[-20],r3,r2,r0, 88,-12); keyiter(k[-19],r4,r3,r1, 89,-11);
k 300 crypto/serpent.c keyiter(k[-18],r0,r4,r2, 90,-10); keyiter(k[-17],r1,r0,r3, 91, -9);
k 301 crypto/serpent.c keyiter(k[-16],r2,r1,r4, 92, -8); keyiter(k[-15],r3,r2,r0, 93, -7);
k 302 crypto/serpent.c keyiter(k[-14],r4,r3,r1, 94, -6); keyiter(k[-13],r0,r4,r2, 95, -5);
k 303 crypto/serpent.c keyiter(k[-12],r1,r0,r3, 96, -4); keyiter(k[-11],r2,r1,r4, 97, -3);
k 304 crypto/serpent.c keyiter(k[-10],r3,r2,r0, 98, -2); keyiter(k[ -9],r4,r3,r1, 99, -1);
k 305 crypto/serpent.c keyiter(k[ -8],r0,r4,r2,100, 0); keyiter(k[ -7],r1,r0,r3,101, 1);
k 306 crypto/serpent.c keyiter(k[ -6],r2,r1,r4,102, 2); keyiter(k[ -5],r3,r2,r0,103, 3);
k 307 crypto/serpent.c keyiter(k[ -4],r4,r3,r1,104, 4); keyiter(k[ -3],r0,r4,r2,105, 5);
k 308 crypto/serpent.c keyiter(k[ -2],r1,r0,r3,106, 6); keyiter(k[ -1],r2,r1,r4,107, 7);
k 309 crypto/serpent.c keyiter(k[ 0],r3,r2,r0,108, 8); keyiter(k[ 1],r4,r3,r1,109, 9);
k 310 crypto/serpent.c keyiter(k[ 2],r0,r4,r2,110, 10); keyiter(k[ 3],r1,r0,r3,111, 11);
k 311 crypto/serpent.c keyiter(k[ 4],r2,r1,r4,112, 12); keyiter(k[ 5],r3,r2,r0,113, 13);
k 312 crypto/serpent.c keyiter(k[ 6],r4,r3,r1,114, 14); keyiter(k[ 7],r0,r4,r2,115, 15);
k 313 crypto/serpent.c keyiter(k[ 8],r1,r0,r3,116, 16); keyiter(k[ 9],r2,r1,r4,117, 17);
k 314 crypto/serpent.c keyiter(k[ 10],r3,r2,r0,118, 18); keyiter(k[ 11],r4,r3,r1,119, 19);
k 315 crypto/serpent.c keyiter(k[ 12],r0,r4,r2,120, 20); keyiter(k[ 13],r1,r0,r3,121, 21);
k 316 crypto/serpent.c keyiter(k[ 14],r2,r1,r4,122, 22); keyiter(k[ 15],r3,r2,r0,123, 23);
k 317 crypto/serpent.c keyiter(k[ 16],r4,r3,r1,124, 24); keyiter(k[ 17],r0,r4,r2,125, 25);
k 318 crypto/serpent.c keyiter(k[ 18],r1,r0,r3,126, 26); keyiter(k[ 19],r2,r1,r4,127, 27);
k 319 crypto/serpent.c keyiter(k[ 20],r3,r2,r0,128, 28); keyiter(k[ 21],r4,r3,r1,129, 29);
k 320 crypto/serpent.c keyiter(k[ 22],r0,r4,r2,130, 30); keyiter(k[ 23],r1,r0,r3,131, 31);
k 338 crypto/serpent.c k -= 50;
k 350 crypto/serpent.c k -= 50;
k 367 crypto/serpent.c *k = ctx->expkey;
k 426 crypto/serpent.c *k = ((struct serpent_ctx *)ctx)->expkey;
k 120 crypto/testmgr.c unsigned int i, j, k, temp;
k 198 crypto/testmgr.c for (k = 0; k < template[i].np; k++) {
k 199 crypto/testmgr.c sg_set_buf(&sg[k],
k 200 crypto/testmgr.c memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
k 201 crypto/testmgr.c offset_in_page(IDX[k]),
k 203 crypto/testmgr.c template[i].tap[k]),
k 204 crypto/testmgr.c template[i].tap[k]);
k 205 crypto/testmgr.c temp += template[i].tap[k];
k 267 crypto/testmgr.c unsigned int i, j, k, n, temp;
k 413 crypto/testmgr.c for (k = 0, temp = 0; k < template[i].np; k++) {
k 414 crypto/testmgr.c if (WARN_ON(offset_in_page(IDX[k]) +
k 415 crypto/testmgr.c template[i].tap[k] > PAGE_SIZE))
k 418 crypto/testmgr.c q = xbuf[IDX[k] >> PAGE_SHIFT] +
k 419 crypto/testmgr.c offset_in_page(IDX[k]);
k 422 crypto/testmgr.c template[i].tap[k]);
k 424 crypto/testmgr.c n = template[i].tap[k];
k 425 crypto/testmgr.c if (k == template[i].np - 1 && enc)
k 430 crypto/testmgr.c sg_set_buf(&sg[k], q, template[i].tap[k]);
k 431 crypto/testmgr.c temp += template[i].tap[k];
k 443 crypto/testmgr.c if (WARN_ON(sg[k - 1].offset +
k 444 crypto/testmgr.c sg[k - 1].length + authsize >
k 450 crypto/testmgr.c sg[k - 1].length += authsize;
k 454 crypto/testmgr.c for (k = 0, temp = 0; k < template[i].anp; k++) {
k 455 crypto/testmgr.c sg_set_buf(&asg[k],
k 456 crypto/testmgr.c memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
k 457 crypto/testmgr.c offset_in_page(IDX[k]),
k 459 crypto/testmgr.c template[i].atap[k]),
k 460 crypto/testmgr.c template[i].atap[k]);
k 461 crypto/testmgr.c temp += template[i].atap[k];
k 494 crypto/testmgr.c for (k = 0, temp = 0; k < template[i].np; k++) {
k 495 crypto/testmgr.c q = xbuf[IDX[k] >> PAGE_SHIFT] +
k 496 crypto/testmgr.c offset_in_page(IDX[k]);
k 498 crypto/testmgr.c n = template[i].tap[k];
k 499 crypto/testmgr.c if (k == template[i].np - 1)
k 505 crypto/testmgr.c "%u for %s\n", j, e, k, algo);
k 511 crypto/testmgr.c if (k == template[i].np - 1 && !enc) {
k 526 crypto/testmgr.c "%s: %u bytes:\n", j, e, k,
k 532 crypto/testmgr.c temp += template[i].tap[k];
k 548 crypto/testmgr.c unsigned int i, j, k;
k 583 crypto/testmgr.c for (k = 0; k < template[i].ilen;
k 584 crypto/testmgr.c k += crypto_cipher_blocksize(tfm)) {
k 586 crypto/testmgr.c crypto_cipher_encrypt_one(tfm, data + k,
k 587 crypto/testmgr.c data + k);
k 589 crypto/testmgr.c crypto_cipher_decrypt_one(tfm, data + k,
k 590 crypto/testmgr.c data + k);
k 614 crypto/testmgr.c unsigned int i, j, k, n, temp;
k 738 crypto/testmgr.c for (k = 0; k < template[i].np; k++) {
k 739 crypto/testmgr.c if (WARN_ON(offset_in_page(IDX[k]) +
k 740 crypto/testmgr.c template[i].tap[k] > PAGE_SIZE))
k 743 crypto/testmgr.c q = xbuf[IDX[k] >> PAGE_SHIFT] +
k 744 crypto/testmgr.c offset_in_page(IDX[k]);
k 747 crypto/testmgr.c template[i].tap[k]);
k 749 crypto/testmgr.c if (offset_in_page(q) + template[i].tap[k] <
k 751 crypto/testmgr.c q[template[i].tap[k]] = 0;
k 753 crypto/testmgr.c sg_set_buf(&sg[k], q, template[i].tap[k]);
k 755 crypto/testmgr.c temp += template[i].tap[k];
k 786 crypto/testmgr.c for (k = 0; k < template[i].np; k++) {
k 787 crypto/testmgr.c q = xbuf[IDX[k] >> PAGE_SHIFT] +
k 788 crypto/testmgr.c offset_in_page(IDX[k]);
k 791 crypto/testmgr.c template[i].tap[k])) {
k 794 crypto/testmgr.c "%u for %s\n", j, e, k, algo);
k 795 crypto/testmgr.c hexdump(q, template[i].tap[k]);
k 799 crypto/testmgr.c q += template[i].tap[k];
k 807 crypto/testmgr.c k, algo, n);
k 811 crypto/testmgr.c temp += template[i].tap[k];
k 69 crypto/twofish.c x += y; y += x + ctx->k[2 * (n) + 1]; \
k 70 crypto/twofish.c (c) ^= x + ctx->k[2 * (n)]; \
k 77 crypto/twofish.c (d) ^= y + ctx->k[2 * (n) + 1]; \
k 80 crypto/twofish.c (c) ^= (x + ctx->k[2 * (n)])
k 550 crypto/twofish_common.c x = CALC_K_2 (k, l, k, l, 0); \
k 563 crypto/twofish_common.c x = CALC_K192_2 (l, l, k, k, 0); \
k 576 crypto/twofish_common.c x = CALC_K256_2 (k, l, 0); \
k 589 crypto/twofish_common.c int i, j, k;
k 654 crypto/twofish_common.c for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
k 655 crypto/twofish_common.c CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
k 672 crypto/twofish_common.c CALC_K256 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
k 676 crypto/twofish_common.c for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
k 677 crypto/twofish_common.c CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
k 685 crypto/twofish_common.c CALC_K192 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
k 689 crypto/twofish_common.c for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
k 690 crypto/twofish_common.c CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
k 698 crypto/twofish_common.c CALC_K (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
k 438 fs/binfmt_elf.c unsigned long k, map_addr;
k 472 fs/binfmt_elf.c k = load_addr + eppnt->p_vaddr;
k 473 fs/binfmt_elf.c if (BAD_ADDR(k) ||
k 476 fs/binfmt_elf.c TASK_SIZE - eppnt->p_memsz < k) {
k 485 fs/binfmt_elf.c k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
k 486 fs/binfmt_elf.c if (k > elf_bss)
k 487 fs/binfmt_elf.c elf_bss = k;
k 493 fs/binfmt_elf.c k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
k 494 fs/binfmt_elf.c if (k > last_bss)
k 495 fs/binfmt_elf.c last_bss = k;
k 777 fs/binfmt_elf.c unsigned long k, vaddr;
k 853 fs/binfmt_elf.c k = elf_ppnt->p_vaddr;
k 854 fs/binfmt_elf.c if (k < start_code)
k 855 fs/binfmt_elf.c start_code = k;
k 856 fs/binfmt_elf.c if (start_data < k)
k 857 fs/binfmt_elf.c start_data = k;
k 864 fs/binfmt_elf.c if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
k 866 fs/binfmt_elf.c TASK_SIZE - elf_ppnt->p_memsz < k) {
k 873 fs/binfmt_elf.c k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
k 875 fs/binfmt_elf.c if (k > elf_bss)
k 876 fs/binfmt_elf.c elf_bss = k;
k 877 fs/binfmt_elf.c if ((elf_ppnt->p_flags & PF_X) && end_code < k)
k 878 fs/binfmt_elf.c end_code = k;
k 879 fs/binfmt_elf.c if (end_data < k)
k 880 fs/binfmt_elf.c end_data = k;
k 881 fs/binfmt_elf.c k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
k 882 fs/binfmt_elf.c if (k > elf_brk)
k 883 fs/binfmt_elf.c elf_brk = k;
k 53 fs/cifs/md4.c #define ROUND1(a,b,c,d,k,s) (*a) = lshift((*a) + F(*b,*c,*d) + X[k], s)
k 54 fs/cifs/md4.c #define ROUND2(a,b,c,d,k,s) (*a) = lshift((*a) + G(*b,*c,*d) + X[k] + (__u32)0x5A827999,s)
k 55 fs/cifs/md4.c #define ROUND3(a,b,c,d,k,s) (*a) = lshift((*a) + H(*b,*c,*d) + X[k] + (__u32)0x6ED9EBA1,s)
k 194 fs/cifs/smbdes.c int i, j, k;
k 265 fs/cifs/smbdes.c for (k = 0; k < 6; k++)
k 266 fs/cifs/smbdes.c b[j][k] = erk[j * 6 + k];
k 275 fs/cifs/smbdes.c for (k = 0; k < 4; k++)
k 276 fs/cifs/smbdes.c b[j][k] =
k 277 fs/cifs/smbdes.c (sbox[j][m][n] & (1 << (3 - k))) ? 1 : 0;
k 281 fs/cifs/smbdes.c for (k = 0; k < 4; k++)
k 282 fs/cifs/smbdes.c cb[j * 4 + k] = b[j][k];
k 187 fs/configfs/dir.c error = configfs_make_dirent(p->d_fsdata, d, k, mode,
k 147 fs/dlm/lockspace.c struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
k 335 fs/ext2/inode.c if (k > 0) {
k 877 fs/ext2/inode.c int k, err;
k 880 fs/ext2/inode.c for (k = depth; k > 1 && !offsets[k-1]; k--)
k 882 fs/ext2/inode.c partial = ext2_get_branch(inode, k, offsets, chain, &err);
k 884 fs/ext2/inode.c partial = chain + k-1;
k 902 fs/ext2/inode.c if (p == chain + k - 1 && p > chain) {
k 489 fs/ext3/inode.c if (k > 0) {
k 1980 fs/ext3/inode.c int k, err;
k 1984 fs/ext3/inode.c for (k = depth; k > 1 && !offsets[k-1]; k--)
k 1986 fs/ext3/inode.c partial = ext3_get_branch(inode, k, offsets, chain, &err);
k 1989 fs/ext3/inode.c partial = chain + k-1;
k 2005 fs/ext3/inode.c if (p == chain + k - 1 && p > chain) {
k 349 fs/ext4/extents.c int k, l = path->p_depth;
k 352 fs/ext4/extents.c for (k = 0; k <= l; k++, path++) {
k 438 fs/ext4/extents.c int k;
k 441 fs/ext4/extents.c for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
k 442 fs/ext4/extents.c if (k != 0 &&
k 445 fs/ext4/extents.c "first=0x%p\n", k,
k 451 fs/ext4/extents.c BUG_ON(k && le32_to_cpu(ix->ei_block)
k 508 fs/ext4/extents.c int k;
k 511 fs/ext4/extents.c for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
k 512 fs/ext4/extents.c BUG_ON(k && le32_to_cpu(ex->ee_block)
k 685 fs/ext4/extents.c int i = at, k, m, a;
k 802 fs/ext4/extents.c k = depth - at - 1;
k 803 fs/ext4/extents.c BUG_ON(k < 0);
k 804 fs/ext4/extents.c if (k)
k 805 fs/ext4/extents.c ext_debug("create %d intermediate indices\n", k);
k 809 fs/ext4/extents.c while (k--) {
k 1286 fs/ext4/extents.c int k, err = 0;
k 1306 fs/ext4/extents.c k = depth - 1;
k 1308 fs/ext4/extents.c err = ext4_ext_get_access(handle, inode, path + k);
k 1311 fs/ext4/extents.c path[k].p_idx->ei_block = border;
k 1312 fs/ext4/extents.c err = ext4_ext_dirty(handle, inode, path + k);
k 1316 fs/ext4/extents.c while (k--) {
k 1318 fs/ext4/extents.c if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
k 1320 fs/ext4/extents.c err = ext4_ext_get_access(handle, inode, path + k);
k 1323 fs/ext4/extents.c path[k].p_idx->ei_block = border;
k 1324 fs/ext4/extents.c err = ext4_ext_dirty(handle, inode, path + k);
k 517 fs/ext4/inode.c if (k > 0) {
k 3317 fs/ext4/inode.c int k, err;
k 3321 fs/ext4/inode.c for (k = depth; k > 1 && !offsets[k-1]; k--)
k 3323 fs/ext4/inode.c partial = ext4_get_branch(inode, k, offsets, chain, &err);
k 3326 fs/ext4/inode.c partial = chain + k-1;
k 3342 fs/ext4/inode.c if (p == chain + k - 1 && p > chain) {
k 528 fs/ext4/mballoc.c int k;
k 571 fs/ext4/mballoc.c k = (i * (1 << order)) + j;
k 573 fs/ext4/mballoc.c !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
k 596 fs/ext4/mballoc.c k = i >> j;
k 597 fs/ext4/mballoc.c MB_CHECK_ASSERT(k < max2);
k 598 fs/ext4/mballoc.c MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
k 610 fs/ext4/mballoc.c ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
k 613 fs/ext4/mballoc.c MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
k 1519 fs/ext4/mballoc.c int k;
k 1530 fs/ext4/mballoc.c k = mb_find_next_zero_bit(buddy, max, 0);
k 1531 fs/ext4/mballoc.c BUG_ON(k >= max);
k 1536 fs/ext4/mballoc.c ac->ac_b_ex.fe_start = k << i;
k 133 fs/fat/dir.c int k;
k 146 fs/fat/dir.c for (k = 4; k > 0; k--) {
k 148 fs/fat/dir.c op[k] = nc > 9 ? nc + ('a' - 10)
k 197 fs/fuse/file.c u32 *k = fc->scramble_key;
k 205 fs/fuse/file.c v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
k 207 fs/fuse/file.c v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
k 115 fs/hpfs/alloc.c unsigned k = bmp[i-1];
k 116 fs/hpfs/alloc.c while (k & 0x80000000) {
k 117 fs/hpfs/alloc.c q--; k <<= 1;
k 334 fs/hpfs/alloc.c unsigned k;
k 336 fs/hpfs/alloc.c for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) {
k 355 fs/hpfs/alloc.c unsigned k;
k 357 fs/hpfs/alloc.c for (k = 0xf; k; k <<= 4)
k 358 fs/hpfs/alloc.c if ((bmp[j] & k) == k) {
k 1368 fs/jfs/jfs_dmap.c int rc, ti, i, k, m, n, agperlev;
k 1462 fs/jfs/jfs_dmap.c for (k = bmp->db_agheigth; k > 0; k--) {
k 2795 fs/jfs/jfs_dmap.c int lp, pp, k;
k 2814 fs/jfs/jfs_dmap.c for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
k 2869 fs/jfs/jfs_dmap.c int ti, n = 0, k, x = 0;
k 2881 fs/jfs/jfs_dmap.c for (k = le32_to_cpu(tp->dmt_height), ti = 1;
k 2882 fs/jfs/jfs_dmap.c k > 0; k--, ti = ((ti + n) << 2) + 1) {
k 3286 fs/jfs/jfs_dmap.c int i, i0 = true, j, j0 = true, k, n;
k 3335 fs/jfs/jfs_dmap.c k = 1 << (l2agsize - oldl2agsize);
k 3341 fs/jfs/jfs_dmap.c for (j = 0; j < k && i < agno; j++, i++) {
k 3355 fs/jfs/jfs_dmap.c bmp->db_maxag = bmp->db_maxag / k;
k 3374 fs/jfs/jfs_dmap.c k = blkno >> L2MAXL1SIZE;
k 3375 fs/jfs/jfs_dmap.c l2leaf = l2dcp->stree + CTLLEAFIND + k;
k 3381 fs/jfs/jfs_dmap.c for (; k < LPERCTL; k++, p += nbperpage) {
k 3522 fs/jfs/jfs_dmap.c if (k > 0)
k 651 fs/jfs/jfs_extent.c u64 m, k;
k 659 fs/jfs/jfs_extent.c k = (u64) 1 << i;
k 660 fs/jfs/jfs_extent.c k = ((k - 1) & nb) ? k : k >> 1;
k 662 fs/jfs/jfs_extent.c return (k);
k 241 fs/jfs/jfs_txnmgr.c int k, size;
k 289 fs/jfs/jfs_txnmgr.c for (k = 1; k < nTxBlock - 1; k++) {
k 290 fs/jfs/jfs_txnmgr.c TxBlock[k].next = k + 1;
k 291 fs/jfs/jfs_txnmgr.c init_waitqueue_head(&TxBlock[k].gcwait);
k 292 fs/jfs/jfs_txnmgr.c init_waitqueue_head(&TxBlock[k].waitor);
k 294 fs/jfs/jfs_txnmgr.c TxBlock[k].next = 0;
k 295 fs/jfs/jfs_txnmgr.c init_waitqueue_head(&TxBlock[k].gcwait);
k 296 fs/jfs/jfs_txnmgr.c init_waitqueue_head(&TxBlock[k].waitor);
k 317 fs/jfs/jfs_txnmgr.c for (k = 1; k < nTxLock - 1; k++)
k 318 fs/jfs/jfs_txnmgr.c TxLock[k].next = k + 1;
k 319 fs/jfs/jfs_txnmgr.c TxLock[k].next = 0;
k 913 fs/jfs/jfs_txnmgr.c lid_t lid, next, llid, k;
k 967 fs/jfs/jfs_txnmgr.c k = linelock->next;
k 969 fs/jfs/jfs_txnmgr.c llid = k;
k 1149 fs/jfs/jfs_txnmgr.c int k, n;
k 1205 fs/jfs/jfs_txnmgr.c for (k = 0; k < cd.nip; k++) {
k 1206 fs/jfs/jfs_txnmgr.c top = (cd.iplist[k])->i_ino;
k 1207 fs/jfs/jfs_txnmgr.c for (n = k + 1; n < cd.nip; n++) {
k 1211 fs/jfs/jfs_txnmgr.c cd.iplist[n] = cd.iplist[k];
k 1212 fs/jfs/jfs_txnmgr.c cd.iplist[k] = ip;
k 1216 fs/jfs/jfs_txnmgr.c ip = cd.iplist[k];
k 1352 fs/jfs/jfs_txnmgr.c for (k = 0; k < cd.nip; k++) {
k 1353 fs/jfs/jfs_txnmgr.c ip = cd.iplist[k];
k 2304 fs/jfs/jfs_txnmgr.c int k, nlock;
k 2351 fs/jfs/jfs_txnmgr.c for (k = 0; k < nlock; k++, maplock++) {
k 57 fs/minix/bitmap.c int k = sb->s_blocksize_bits + 3;
k 65 fs/minix/bitmap.c bit = zone & ((1<<k) - 1);
k 66 fs/minix/bitmap.c zone >>= k;
k 196 fs/minix/bitmap.c int k = sb->s_blocksize_bits + 3;
k 204 fs/minix/bitmap.c bit = ino & ((1<<k) - 1);
k 205 fs/minix/bitmap.c ino >>= k;
k 223 fs/minix/itree_common.c int k, err;
k 226 fs/minix/itree_common.c for (k = depth; k > 1 && !offsets[k-1]; k--)
k 228 fs/minix/itree_common.c partial = get_branch(inode, k, offsets, chain, &err);
k 232 fs/minix/itree_common.c partial = chain + k-1;
k 239 fs/minix/itree_common.c if (p == chain + k - 1 && p > chain) {
k 352 fs/minix/itree_common.c int k = sb->s_blocksize_bits - 10;
k 354 fs/minix/itree_common.c blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k);
k 1115 fs/ncpfs/ncplib_kernel.c int k;
k 1117 fs/ncpfs/ncplib_kernel.c k = utf8_mbtowc(&ec, iname, iname_end - iname);
k 1118 fs/ncpfs/ncplib_kernel.c if (k < 0)
k 1120 fs/ncpfs/ncplib_kernel.c iname += k;
k 1123 fs/ncpfs/ncplib_kernel.c int k;
k 1129 fs/ncpfs/ncplib_kernel.c for (k = 1; k < 5; k++) {
k 1132 fs/ncpfs/ncplib_kernel.c nc = iname[k] - '0';
k 1215 fs/ncpfs/ncplib_kernel.c int k;
k 1217 fs/ncpfs/ncplib_kernel.c k = utf8_wctomb(iname, ec, iname_end - iname);
k 1218 fs/ncpfs/ncplib_kernel.c if (k < 0) {
k 1222 fs/ncpfs/ncplib_kernel.c iname += k;
k 1227 fs/ncpfs/ncplib_kernel.c int k;
k 1234 fs/ncpfs/ncplib_kernel.c for (k = 4; k > 0; k--) {
k 1241 fs/ncpfs/ncplib_kernel.c iname[k] = v;
k 142 fs/ncpfs/ncplib_kernel.h #define ncp_io2vol(S,m,i,n,k,U) ncp__io2vol(S,m,i,n,k,U)
k 143 fs/ncpfs/ncplib_kernel.h #define ncp_vol2io(S,m,i,n,k,U) ncp__vol2io(S,m,i,n,k,U)
k 155 fs/ncpfs/ncplib_kernel.h #define ncp_io2vol(S,m,i,n,k,U) ncp__io2vol(m,i,n,k,U)
k 156 fs/ncpfs/ncplib_kernel.h #define ncp_vol2io(S,m,i,n,k,U) ncp__vol2io(m,i,n,k,U)
k 233 fs/proc/array.c struct k_sigaction *k;
k 236 fs/proc/array.c k = p->sighand->action;
k 237 fs/proc/array.c for (i = 1; i <= _NSIG; ++i, ++k) {
k 238 fs/proc/array.c if (k->sa.sa_handler == SIG_IGN)
k 240 fs/proc/array.c else if (k->sa.sa_handler != SIG_DFL)
k 52 fs/reiserfs/hashes.c u32 k[] = { 0x9464a485, 0x542e1a94, 0x3e846bff, 0xb75bcfc3 };
k 54 fs/reiserfs/hashes.c u32 h0 = k[0], h1 = k[1];
k 763 fs/reiserfs/ibalance.c int insert_num, n, k;
k 795 fs/reiserfs/ibalance.c k = 0;
k 832 fs/reiserfs/ibalance.c k = tb->lnum[h] - child_pos - 1;
k 839 fs/reiserfs/ibalance.c n + child_pos + 1, k,
k 842 fs/reiserfs/ibalance.c replace_lkey(tb, h, insert_key + k);
k 847 fs/reiserfs/ibalance.c MAX_CHILD_SIZE(insert_ptr[k]) -
k 848 fs/reiserfs/ibalance.c B_FREE_SPACE(insert_ptr[k]));
k 849 fs/reiserfs/ibalance.c put_dc_block_number(dc, insert_ptr[k]->b_blocknr);
k 853 fs/reiserfs/ibalance.c k++;
k 854 fs/reiserfs/ibalance.c insert_key += k;
k 855 fs/reiserfs/ibalance.c insert_ptr += k;
k 856 fs/reiserfs/ibalance.c insert_num -= k;
k 895 fs/reiserfs/ibalance.c k = tb->rnum[h] - n + child_pos - 1;
k 902 fs/reiserfs/ibalance.c 0, k, insert_key + 1,
k 905 fs/reiserfs/ibalance.c replace_rkey(tb, h, insert_key + insert_num - k - 1);
k 911 fs/reiserfs/ibalance.c [insert_num - k - 1]) -
k 913 fs/reiserfs/ibalance.c [insert_num - k - 1]));
k 915 fs/reiserfs/ibalance.c insert_ptr[insert_num - k -
k 920 fs/reiserfs/ibalance.c insert_num -= (k + 1);
k 1038 fs/reiserfs/ibalance.c k = snum - n + child_pos - 1;
k 1040 fs/reiserfs/ibalance.c internal_insert_childs(&dest_bi, /*S_new, */ 0, k,
k 1044 fs/reiserfs/ibalance.c memcpy(&new_insert_key, insert_key + insert_num - k - 1,
k 1051 fs/reiserfs/ibalance.c (insert_ptr[insert_num - k - 1]) -
k 1053 fs/reiserfs/ibalance.c [insert_num - k - 1])));
k 1055 fs/reiserfs/ibalance.c insert_ptr[insert_num - k -
k 1060 fs/reiserfs/ibalance.c insert_num -= (k + 1);
k 507 fs/reiserfs/item_ops.c int k, l;
k 510 fs/reiserfs/item_ops.c for (k = 0; k < dir_u->entry_count; k++)
k 511 fs/reiserfs/item_ops.c l += dir_u->entry_sizes[k];
k 162 fs/reiserfs/prints.c char *k = fmt;
k 166 fs/reiserfs/prints.c while ((k = strchr(k, '%')) != NULL) {
k 167 fs/reiserfs/prints.c if (k[1] == 'k' || k[1] == 'K' || k[1] == 'h' || k[1] == 't' ||
k 168 fs/reiserfs/prints.c k[1] == 'z' || k[1] == 'b' || k[1] == 'y' || k[1] == 'a') {
k 169 fs/reiserfs/prints.c *what = k[1];
k 173 fs/reiserfs/prints.c k++;
k 175 fs/reiserfs/prints.c return k;
k 194 fs/reiserfs/prints.c char *k;
k 200 fs/reiserfs/prints.c while ((k = is_there_reiserfs_struct(fmt1, &what, &skip)) != NULL) {
k 201 fs/reiserfs/prints.c *k = 0;
k 243 fs/reiserfs/prints.c fmt1 = k + 2;
k 445 fs/sysfs/file.c struct sysfs_dirent *sd = k->sd;
k 283 fs/sysv/itree.c int k, err;
k 286 fs/sysv/itree.c for (k = depth; k > 1 && !offsets[k-1]; k--)
k 290 fs/sysv/itree.c partial = get_branch(inode, k, offsets, chain, &err);
k 292 fs/sysv/itree.c partial = chain + k-1;
k 309 fs/sysv/itree.c if (p == chain + k - 1 && p > chain) {
k 108 fs/ubifs/key.h union ubifs_key *key = k;
k 112 fs/ubifs/key.h memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
k 186 fs/ubifs/key.h union ubifs_key *key = k;
k 193 fs/ubifs/key.h memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
k 254 fs/ubifs/key.h union ubifs_key *key = k;
k 261 fs/ubifs/key.h memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
k 303 fs/ubifs/key.h union ubifs_key *key = k;
k 309 fs/ubifs/key.h memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
k 346 fs/ubifs/key.h const union ubifs_key *key = k;
k 358 fs/ubifs/key.h const union ubifs_key *key = k;
k 370 fs/ubifs/key.h const union ubifs_key *key = k;
k 393 fs/ubifs/key.h const union ubifs_key *key = k;
k 417 fs/ubifs/key.h const union ubifs_key *key = k;
k 287 fs/ubifs/lpt.c const int k = 32 - nrbits;
k 305 fs/ubifs/lpt.c val <<= k;
k 306 fs/ubifs/lpt.c val >>= k;
k 2296 fs/ubifs/tnc.c int i, n, k, err = 0;
k 2334 fs/ubifs/tnc.c for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
k 2347 fs/ubifs/tnc.c if (k) {
k 2348 fs/ubifs/tnc.c for (i = n + 1 + k; i < znode->child_cnt; i++)
k 2349 fs/ubifs/tnc.c znode->zbranch[i - k] = znode->zbranch[i];
k 2350 fs/ubifs/tnc.c znode->child_cnt -= k;
k 159 fs/udf/partition.c int i, j, k, l;
k 184 fs/udf/partition.c for (k = 0; k < reallocationTableLen; k++) {
k 185 fs/udf/partition.c struct sparingEntry *entry = &st->mapEntry[k];
k 223 fs/udf/partition.c for (l = k; l < reallocationTableLen; l++) {
k 239 fs/udf/partition.c memmove(&st->mapEntry[k + 1],
k 240 fs/udf/partition.c &st->mapEntry[k],
k 241 fs/udf/partition.c (l - k) *
k 243 fs/udf/partition.c st->mapEntry[k] = mapEntry;
k 252 fs/udf/partition.c st->mapEntry[k].mappedLocation) +
k 585 fs/ufs/balloc.c for (k = count; k < uspi->s_fpb; k++) \
k 586 fs/ufs/balloc.c if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
k 597 fs/ufs/balloc.c unsigned oldcg, i, j, k, allocsize;
k 469 fs/vfat/namei.c int i, k, fill;
k 497 fs/vfat/namei.c for (k = 1; k < 5; k++) {
k 498 fs/vfat/namei.c nc = ip[k];
k 191 fs/xfs/xfs_alloc.c xfs_extlen_t k;
k 201 fs/xfs/xfs_alloc.c k = rlen % args->prod;
k 202 fs/xfs/xfs_alloc.c if (k == args->mod)
k 204 fs/xfs/xfs_alloc.c if (k > args->mod) {
k 205 fs/xfs/xfs_alloc.c if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
k 208 fs/xfs/xfs_alloc.c if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
k 246 fs/xfs/xfs_bmap_btree.c o = be64_to_cpu(k->br_startoff);
k 290 fs/xfs/xfs_bmap_btree.c xfs_bmbt_trace_argik(__func__, c, i, k, __LINE__)
k 90 fs/xfs/xfs_bmap_btree.h #define NULLSTARTBLOCK(k) nullstartblock(k)
k 93 fs/xfs/xfs_bmap_btree.h ASSERT(k < (1 << STARTBLOCKVALBITS));
k 94 fs/xfs/xfs_bmap_btree.h return STARTBLOCKMASK | (k);
k 47 fs/xfs/xfs_inum.h #define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1)
k 3391 fs/xfs/xfs_log.c int len, i, j, k, op_len;
k 3435 fs/xfs/xfs_log.c k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
k 3437 fs/xfs/xfs_log.c xhdr[j].hic_xheader.xh_cycle_data[k]);
k 3458 fs/xfs/xfs_log.c k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
k 3459 fs/xfs/xfs_log.c op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]);
k 3360 fs/xfs/xfs_log_recover.c int i, j, k;
k 3382 fs/xfs/xfs_log_recover.c k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
k 3383 fs/xfs/xfs_log_recover.c xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
k 3436 fs/xfs/xfs_log_recover.c int i, j, k;
k 3449 fs/xfs/xfs_log_recover.c k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
k 3450 fs/xfs/xfs_log_recover.c *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
k 26 include/asm-cris/arch-v32/hwregs/asm/ata_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/bif_core_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/bif_dma_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/bif_slave_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/config_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/cris_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/dma_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/eth_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/gio_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/intr_vect_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/irq_nmi_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/marb_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 333 include/asm-cris/arch-v32/hwregs/asm/marb_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/mmu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/pinmux_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/rt_trace_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/ser_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/sser_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/strcop_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/strmux_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/asm/timer_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_crc_par_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_dmc_in_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_dmc_out_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_fifo_in_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_fifo_in_extra_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_fifo_out_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_fifo_out_extra_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_mpu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_sap_in_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_sap_out_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_scrc_in_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_scrc_out_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_spu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_sw_cfg_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_sw_cpu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_sw_mpu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_sw_spu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_timer_grp_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_trigger_grp_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/hwregs/iop/asm/iop_version_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/asm/clkgen_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/asm/ddr2_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/asm/gio_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/asm/pinmux_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/asm/pio_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/asm/timer_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/iop/asm/iop_sap_in_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/iop/asm/iop_sap_out_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/iop/asm/iop_sw_cfg_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/iop/asm/iop_sw_cpu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/iop/asm/iop_sw_mpu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/iop/asm/iop_sw_spu_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 23 include/asm-cris/arch-v32/mach-a3/hwregs/iop/asm/iop_version_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/mach-fs/hwregs/asm/bif_core_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/mach-fs/hwregs/asm/config_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/mach-fs/hwregs/asm/gio_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/mach-fs/hwregs/asm/pinmux_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 26 include/asm-cris/arch-v32/mach-fs/hwregs/asm/timer_defs_asm.h #define REG_STATE_X_( k, shift ) (k << shift)
k 84 include/asm-cris/termios.h #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
k 85 include/asm-cris/termios.h #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
k 86 include/asm-cris/termios.h #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
k 87 include/asm-cris/termios.h #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
k 65 include/asm-generic/termios.h #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
k 69 include/asm-generic/termios.h #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
k 72 include/asm-generic/termios.h #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
k 73 include/asm-generic/termios.h #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
k 84 include/asm-m32r/termios.h #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
k 85 include/asm-m32r/termios.h #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
k 86 include/asm-m32r/termios.h #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
k 87 include/asm-m32r/termios.h #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
k 85 include/asm-m68k/termios.h #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
k 86 include/asm-m68k/termios.h #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
k 87 include/asm-m68k/termios.h #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
k 88 include/asm-m68k/termios.h #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
k 84 include/asm-mn10300/termios.h copy_from_user(k, u, sizeof(struct termios2))
k 86 include/asm-mn10300/termios.h copy_to_user(u, k, sizeof(struct termios2))
k 88 include/asm-mn10300/termios.h copy_from_user(k, u, sizeof(struct termios))
k 90 include/asm-mn10300/termios.h copy_to_user(u, k, sizeof(struct termios))
k 83 include/asm-parisc/termios.h #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
k 84 include/asm-parisc/termios.h #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
k 85 include/asm-parisc/termios.h #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
k 86 include/asm-parisc/termios.h #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
k 90 include/asm-x86/termios.h return copy_from_user(k, u, sizeof(struct termios2));
k 96 include/asm-x86/termios.h return copy_to_user(u, k, sizeof(struct termios2));
k 102 include/asm-x86/termios.h return copy_from_user(k, u, sizeof(struct termios));
k 108 include/asm-x86/termios.h return copy_to_user(u, k, sizeof(struct termios));
k 98 include/asm-xtensa/termios.h #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
k 99 include/asm-xtensa/termios.h #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
k 100 include/asm-xtensa/termios.h #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
k 101 include/asm-xtensa/termios.h #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
k 17 include/crypto/twofish.h u32 s[4][256], w[8], k[32];
k 31 include/linux/filter.h __u32 k; /* Generic multiuse field */
k 104 include/linux/filter.h #define BPF_STMT(code, k) { (unsigned short)(code), 0, 0, k }
k 107 include/linux/filter.h #define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k }
k 18 include/linux/genhd.h #define kobj_to_dev(k) container_of((k), struct device, kobj)
k 47 include/linux/jhash.h const u8 *k = key;
k 54 include/linux/jhash.h a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
k 55 include/linux/jhash.h b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
k 56 include/linux/jhash.h c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
k 60 include/linux/jhash.h k += 12;
k 66 include/linux/jhash.h case 11: c += ((u32)k[10]<<24);
k 67 include/linux/jhash.h case 10: c += ((u32)k[9]<<16);
k 68 include/linux/jhash.h case 9 : c += ((u32)k[8]<<8);
k 69 include/linux/jhash.h case 8 : b += ((u32)k[7]<<24);
k 70 include/linux/jhash.h case 7 : b += ((u32)k[6]<<16);
k 71 include/linux/jhash.h case 6 : b += ((u32)k[5]<<8);
k 72 include/linux/jhash.h case 5 : b += k[4];
k 73 include/linux/jhash.h case 4 : a += ((u32)k[3]<<24);
k 74 include/linux/jhash.h case 3 : a += ((u32)k[2]<<16);
k 75 include/linux/jhash.h case 2 : a += ((u32)k[1]<<8);
k 76 include/linux/jhash.h case 1 : a += k[0];
k 96 include/linux/jhash.h a += k[0];
k 97 include/linux/jhash.h b += k[1];
k 98 include/linux/jhash.h c += k[2];
k 100 include/linux/jhash.h k += 3; len -= 3;
k 106 include/linux/jhash.h case 2 : b += k[1];
k 107 include/linux/jhash.h case 1 : a += k[0];
k 172 include/linux/kobject.h return k ? to_kset(kobject_get(&k->kobj)) : NULL;
k 177 include/linux/kobject.h kobject_put(&k->kobj);
k 210 include/linux/mempolicy.h if (k > policy_zone && k != ZONE_MOVABLE)
k 211 include/linux/mempolicy.h policy_zone = k;
k 267 include/linux/slab.h return kmem_cache_alloc(k, flags | __GFP_ZERO);
k 55 include/linux/workqueue.h .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
k 84 include/linux/zutil.h int k;
k 89 include/linux/zutil.h k = len < NMAX ? len : NMAX;
k 90 include/linux/zutil.h len -= k;
k 91 include/linux/zutil.h while (k >= 16) {
k 94 include/linux/zutil.h k -= 16;
k 96 include/linux/zutil.h if (k != 0) do {
k 99 include/linux/zutil.h } while (--k);
k 54 include/net/llc_conn.h u8 k; /* tx window size; max = 127 */
k 541 include/net/xfrm.h int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k);
k 455 ipc/mqueue.c int k;
k 457 ipc/mqueue.c k = info->attr.mq_curmsgs - 1;
k 458 ipc/mqueue.c while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
k 459 ipc/mqueue.c info->messages[k + 1] = info->messages[k];
k 460 ipc/mqueue.c k--;
k 464 ipc/mqueue.c info->messages[k + 1] = ptr;
k 261 kernel/cgroup.c struct css_set *cg = container_of(k, struct css_set, ref);
k 281 kernel/cgroup.c __release_css_set(k, 0);
k 286 kernel/cgroup.c __release_css_set(k, 1);
k 426 kernel/compat.c unsigned long *k;
k 433 kernel/compat.c k = cpus_addr(*new_mask);
k 434 kernel/compat.c return compat_get_bitmap(k, user_mask_ptr, len * 8);
k 456 kernel/compat.c unsigned long *k;
k 469 kernel/compat.c k = cpus_addr(mask);
k 470 kernel/compat.c ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
k 584 kernel/cpuset.c int i, j, k; /* indices for partition finding loops */
k 660 kernel/cpuset.c for (k = 0; k < csn; k++) {
k 661 kernel/cpuset.c struct cpuset *c = csa[k];
k 39 kernel/kthread.c struct task_struct *k;
k 58 kernel/kthread.c return (kthread_stop_info.k == current);
k 174 kernel/kthread.c if (k->state != TASK_UNINTERRUPTIBLE) {
k 179 kernel/kthread.c wait_task_inactive(k, 0);
k 180 kernel/kthread.c set_task_cpu(k, cpu);
k 181 kernel/kthread.c k->cpus_allowed = cpumask_of_cpu(cpu);
k 182 kernel/kthread.c k->rt.nr_cpus_allowed = 1;
k 183 kernel/kthread.c k->flags |= PF_THREAD_BOUND;
k 207 kernel/kthread.c get_task_struct(k);
k 214 kernel/kthread.c kthread_stop_info.k = k;
k 215 kernel/kthread.c wake_up_process(k);
k 216 kernel/kthread.c put_task_struct(k);
k 220 kernel/kthread.c kthread_stop_info.k = NULL;
k 245 kernel/power/swap.c unsigned int k;
k 265 kernel/power/swap.c handle->k = 0;
k 281 kernel/power/swap.c handle->cur->entries[handle->k++] = offset;
k 282 kernel/power/swap.c if (handle->k >= MAP_PAGE_ENTRIES) {
k 295 kernel/power/swap.c handle->k = 0;
k 461 kernel/power/swap.c handle->k = 0;
k 473 kernel/power/swap.c offset = handle->cur->entries[handle->k];
k 479 kernel/power/swap.c if (++handle->k >= MAP_PAGE_ENTRIES) {
k 481 kernel/power/swap.c handle->k = 0;
k 181 kernel/power/swsusp.c int k;
k 189 kernel/power/swsusp.c k = nr_pages * (PAGE_SIZE / 1024);
k 190 kernel/power/swsusp.c kps = (k * 100) / centisecs;
k 192 kernel/power/swsusp.c msg, k,
k 207 kernel/printk.c unsigned long long k;
k 213 kernel/printk.c k = (unsigned long long)printk_delay_msec * boot_delay;
k 216 kernel/printk.c while (k) {
k 217 kernel/printk.c k--;
k 2304 kernel/signal.c struct k_sigaction *k;
k 2310 kernel/signal.c k = &t->sighand->action[sig-1];
k 2314 kernel/signal.c *oact = *k;
k 2319 kernel/signal.c *k = *act;
k 43 lib/bitmap.c int k, lim = bits/BITS_PER_LONG;
k 44 lib/bitmap.c for (k = 0; k < lim; ++k)
k 45 lib/bitmap.c if (bitmap[k])
k 49 lib/bitmap.c if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
k 58 lib/bitmap.c int k, lim = bits/BITS_PER_LONG;
k 59 lib/bitmap.c for (k = 0; k < lim; ++k)
k 60 lib/bitmap.c if (~bitmap[k])
k 64 lib/bitmap.c if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits))
k 74 lib/bitmap.c int k, lim = bits/BITS_PER_LONG;
k 75 lib/bitmap.c for (k = 0; k < lim; ++k)
k 76 lib/bitmap.c if (bitmap1[k] != bitmap2[k])
k 80 lib/bitmap.c if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
k 89 lib/bitmap.c int k, lim = bits/BITS_PER_LONG;
k 90 lib/bitmap.c for (k = 0; k < lim; ++k)
k 91 lib/bitmap.c dst[k] = ~src[k];
k 94 lib/bitmap.c dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits);
k 112 lib/bitmap.c int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
k 115 lib/bitmap.c for (k = 0; off + k < lim; ++k) {
k 122 lib/bitmap.c if (!rem || off + k + 1 >= lim)
k 125 lib/bitmap.c upper = src[off + k + 1];
k 126 lib/bitmap.c if (off + k + 1 == lim - 1 && left)
k 129 lib/bitmap.c lower = src[off + k];
k 130 lib/bitmap.c if (left && off + k == lim - 1)
k 132 lib/bitmap.c dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
k 133 lib/bitmap.c if (left && k == lim - 1)
k 134 lib/bitmap.c dst[k] &= mask;
k 157 lib/bitmap.c int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG;
k 159 lib/bitmap.c for (k = lim - off - 1; k >= 0; --k) {
k 166 lib/bitmap.c if (rem && k > 0)
k 167 lib/bitmap.c lower = src[k - 1];
k 170 lib/bitmap.c upper = src[k];
k 171 lib/bitmap.c if (left && k == lim - 1)
k 173 lib/bitmap.c dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
k 174 lib/bitmap.c if (left && k + off == lim - 1)
k 175 lib/bitmap.c dst[k + off] &= (1UL << left) - 1;
k 185 lib/bitmap.c int k;
k 188 lib/bitmap.c for (k = 0; k < nr; k++)
k 189 lib/bitmap.c dst[k] = bitmap1[k] & bitmap2[k];
k 196 lib/bitmap.c int k;
k 199 lib/bitmap.c for (k = 0; k < nr; k++)
k 200 lib/bitmap.c dst[k] = bitmap1[k] | bitmap2[k];
k 207 lib/bitmap.c int k;
k 210 lib/bitmap.c for (k = 0; k < nr; k++)
k 211 lib/bitmap.c dst[k] = bitmap1[k] ^ bitmap2[k];
k 218 lib/bitmap.c int k;
k 221 lib/bitmap.c for (k = 0; k < nr; k++)
k 222 lib/bitmap.c dst[k] = bitmap1[k] & ~bitmap2[k];
k 229 lib/bitmap.c int k, lim = bits/BITS_PER_LONG;
k 230 lib/bitmap.c for (k = 0; k < lim; ++k)
k 231 lib/bitmap.c if (bitmap1[k] & bitmap2[k])
k 235 lib/bitmap.c if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
k 244 lib/bitmap.c int k, lim = bits/BITS_PER_LONG;
k 245 lib/bitmap.c for (k = 0; k < lim; ++k)
k 246 lib/bitmap.c if (bitmap1[k] & ~bitmap2[k])
k 250 lib/bitmap.c if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
k 258 lib/bitmap.c int k, w = 0, lim = bits/BITS_PER_LONG;
k 260 lib/bitmap.c for (k = 0; k < lim; k++)
k 261 lib/bitmap.c w += hweight_long(bitmap[k]);
k 264 lib/bitmap.c w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits));
k 230 lib/inflate.c #define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
k 231 lib/inflate.c #define DUMPBITS(n) {b>>=(n);k-=(n);}
k 339 lib/inflate.c register int k; /* number of bits in current code */
k 393 lib/inflate.c k = j; /* minimum code length */
k 450 lib/inflate.c for (; k <= g; k++)
k 453 lib/inflate.c a = c[k];
k 459 lib/inflate.c while (k > w + l)
k 467 lib/inflate.c if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
k 471 lib/inflate.c xp = c + k;
k 514 lib/inflate.c r.b = (uch)(k - w);
k 531 lib/inflate.c f = 1 << (k - w);
k 536 lib/inflate.c for (j = 1 << (k - 1); i & j; j >>= 1)
k 600 lib/inflate.c register unsigned k; /* number of bits in bit buffer */
k 605 lib/inflate.c k = bk;
k 689 lib/inflate.c bk = k;
k 706 lib/inflate.c register unsigned k; /* number of bits in bit buffer */
k 712 lib/inflate.c k = bk;
k 717 lib/inflate.c n = k & 7;
k 748 lib/inflate.c bk = k;
k 842 lib/inflate.c register unsigned k; /* number of bits in bit buffer */
k 858 lib/inflate.c k = bk;
k 968 lib/inflate.c bk = k;
k 1037 lib/inflate.c register unsigned k; /* number of bits in bit buffer */
k 1043 lib/inflate.c k = bk;
k 1060 lib/inflate.c bk = k;
k 1150 lib/inflate.c int k; /* byte being shifted into crc apparatus */
k 1165 lib/inflate.c for (k = i | 256; k != 1; k >>= 1)
k 1168 lib/inflate.c if (k & 1)
k 87 lib/klist.c INIT_LIST_HEAD(&k->k_list);
k 88 lib/klist.c spin_lock_init(&k->k_lock);
k 89 lib/klist.c k->get = get;
k 90 lib/klist.c k->put = put;
k 96 lib/klist.c spin_lock(&k->k_lock);
k 97 lib/klist.c list_add(&n->n_node, &k->k_list);
k 98 lib/klist.c spin_unlock(&k->k_lock);
k 103 lib/klist.c spin_lock(&k->k_lock);
k 104 lib/klist.c list_add_tail(&n->n_node, &k->k_list);
k 105 lib/klist.c spin_unlock(&k->k_lock);
k 113 lib/klist.c knode_set_klist(n, k);
k 114 lib/klist.c if (k->get)
k 115 lib/klist.c k->get(n);
k 125 lib/klist.c klist_node_init(k, n);
k 126 lib/klist.c add_head(k, n);
k 137 lib/klist.c klist_node_init(k, n);
k 138 lib/klist.c add_tail(k, n);
k 149 lib/klist.c struct klist *k = knode_klist(pos);
k 151 lib/klist.c klist_node_init(k, n);
k 152 lib/klist.c spin_lock(&k->k_lock);
k 154 lib/klist.c spin_unlock(&k->k_lock);
k 165 lib/klist.c struct klist *k = knode_klist(pos);
k 167 lib/klist.c klist_node_init(k, n);
k 168 lib/klist.c spin_lock(&k->k_lock);
k 170 lib/klist.c spin_unlock(&k->k_lock);
k 191 lib/klist.c struct klist *k = knode_klist(n);
k 192 lib/klist.c void (*put)(struct klist_node *) = k->put;
k 194 lib/klist.c spin_lock(&k->k_lock);
k 199 lib/klist.c spin_unlock(&k->k_lock);
k 247 lib/klist.c i->i_klist = k;
k 263 lib/klist.c klist_iter_init_node(k, i, NULL);
k 665 lib/kobject.c kobject_init_internal(&k->kobj);
k 666 lib/kobject.c INIT_LIST_HEAD(&k->list);
k 667 lib/kobject.c spin_lock_init(&k->list_lock);
k 708 lib/kobject.c if (!k)
k 711 lib/kobject.c kset_init(k);
k 712 lib/kobject.c err = kobject_add_internal(&k->kobj);
k 715 lib/kobject.c kobject_uevent(&k->kobj, KOBJ_ADD);
k 725 lib/kobject.c if (!k)
k 727 lib/kobject.c kobject_put(&k->kobj);
k 741 lib/kobject.c struct kobject *k;
k 745 lib/kobject.c list_for_each_entry(k, &kset->list, entry) {
k 746 lib/kobject.c if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
k 747 lib/kobject.c ret = kobject_get(k);
k 6 lib/reciprocal_div.c u64 val = (1LL << 32) + (k - 1);
k 7 lib/reciprocal_div.c do_div(val, k);
k 21 lib/reed_solomon/decode_rs.c int i, j, r, k, pad;
k 181 lib/reed_solomon/decode_rs.c for (i = 1, k = iprim - 1; i <= nn; i++, k = rs_modnn(rs, k + iprim)) {
k 193 lib/reed_solomon/decode_rs.c loc[count] = k;
k 81 lib/ts_kmp.c unsigned int k, q;
k 84 lib/ts_kmp.c for (k = 0, q = 1; q < len; q++) {
k 85 lib/ts_kmp.c while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
k 87 lib/ts_kmp.c k = prefix_tbl[k-1];
k 88 lib/ts_kmp.c if ((icase ? toupper(pattern[k]) : pattern[k])
k 90 lib/ts_kmp.c k++;
k 91 lib/ts_kmp.c prefix_tbl[q] = k;
k 377 lib/zlib_deflate/deftree.c int v = s->heap[k];
k 378 lib/zlib_deflate/deftree.c int j = k << 1; /* left son of k */
k 389 lib/zlib_deflate/deftree.c s->heap[k] = s->heap[j]; k = j;
k 394 lib/zlib_deflate/deftree.c s->heap[k] = v;
k 125 mm/mempolicy.c int nd, k;
k 128 mm/mempolicy.c k = policy_zone;
k 133 mm/mempolicy.c for (k = 0; k <= policy_zone; k++) {
k 134 mm/mempolicy.c z = &NODE_DATA(nd)->node_zones[k];
k 1003 mm/mempolicy.c unsigned long k;
k 1025 mm/mempolicy.c for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
k 1027 mm/mempolicy.c if (get_user(t, nmask + k))
k 1029 mm/mempolicy.c if (k == nlongs - 1) {
k 361 net/ax25/af_ax25.c unsigned int k;
k 373 net/ax25/af_ax25.c for (k = 0; k < digi.ndigi; k++)
k 374 net/ax25/af_ax25.c digi.calls[k] = ax25_ctl.digi_addr[k];
k 1891 net/ax25/af_ax25.c int k;
k 1906 net/ax25/af_ax25.c for (k=0; (ax25->digipeat != NULL) && (k < ax25->digipeat->ndigi); k++) {
k 1908 net/ax25/af_ax25.c ax2asc(buf, &ax25->digipeat->calls[k]),
k 1909 net/ax25/af_ax25.c ax25->digipeat->repeated[k]? "*":"");
k 396 net/ax25/ax25_route.c int k;
k 398 net/ax25/ax25_route.c for (k = 0; k < digipeat->ndigi; k++) {
k 399 net/ax25/ax25_route.c if (ax25cmp(addr, &digipeat->calls[k]) == 0)
k 403 net/ax25/ax25_route.c digipeat->ndigi = k;
k 190 net/ax25/sysctl_net_ax25.c int n, k;
k 219 net/ax25/sysctl_net_ax25.c for (k = 0; k < AX25_MAX_VALUES; k++)
k 220 net/ax25/sysctl_net_ax25.c child[k].data = &ax25_dev->values[k];
k 816 net/bridge/netfilter/ebtables.c unsigned int i, j, k, udc_cnt;
k 846 net/bridge/netfilter/ebtables.c k = 0; /* holds the total nr. of entries, should equal
k 851 net/bridge/netfilter/ebtables.c &i, &j, &k, &udc_cnt);
k 861 net/bridge/netfilter/ebtables.c if (k != newinfo->nentries) {
k 45 net/core/filter.c if (k >= SKF_NET_OFF)
k 46 net/core/filter.c ptr = skb_network_header(skb) + k - SKF_NET_OFF;
k 47 net/core/filter.c else if (k >= SKF_LL_OFF)
k 48 net/core/filter.c ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
k 58 net/core/filter.c if (k >= 0)
k 59 net/core/filter.c return skb_header_pointer(skb, k, size, buffer);
k 61 net/core/filter.c if (k >= SKF_AD_OFF)
k 63 net/core/filter.c return __load_pointer(skb, k);
k 120 net/core/filter.c int k;
k 134 net/core/filter.c A += fentry->k;
k 140 net/core/filter.c A -= fentry->k;
k 146 net/core/filter.c A *= fentry->k;
k 154 net/core/filter.c A /= fentry->k;
k 160 net/core/filter.c A &= fentry->k;
k 166 net/core/filter.c A |= fentry->k;
k 172 net/core/filter.c A <<= fentry->k;
k 178 net/core/filter.c A >>= fentry->k;
k 184 net/core/filter.c pc += fentry->k;
k 187 net/core/filter.c pc += (A > fentry->k) ? fentry->jt : fentry->jf;
k 190 net/core/filter.c pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
k 193 net/core/filter.c pc += (A == fentry->k) ? fentry->jt : fentry->jf;
k 196 net/core/filter.c pc += (A & fentry->k) ? fentry->jt : fentry->jf;
k 211 net/core/filter.c k = fentry->k;
k 213 net/core/filter.c ptr = load_pointer(skb, k, 4, &tmp);
k 220 net/core/filter.c k = fentry->k;
k 222 net/core/filter.c ptr = load_pointer(skb, k, 2, &tmp);
k 229 net/core/filter.c k = fentry->k;
k 231 net/core/filter.c ptr = load_pointer(skb, k, 1, &tmp);
k 244 net/core/filter.c k = X + fentry->k;
k 247 net/core/filter.c k = X + fentry->k;
k 250 net/core/filter.c k = X + fentry->k;
k 253 net/core/filter.c ptr = load_pointer(skb, fentry->k, 1, &tmp);
k 260 net/core/filter.c A = fentry->k;
k 263 net/core/filter.c X = fentry->k;
k 266 net/core/filter.c A = mem[fentry->k];
k 269 net/core/filter.c X = mem[fentry->k];
k 278 net/core/filter.c return fentry->k;
k 282 net/core/filter.c mem[fentry->k] = A;
k 285 net/core/filter.c mem[fentry->k] = X;
k 296 net/core/filter.c switch (k-SKF_AD_OFF) {
k 396 net/core/filter.c if (ftest->k == 0)
k 405 net/core/filter.c if (ftest->k >= BPF_MEMWORDS)
k 415 net/core/filter.c if (ftest->k >= (unsigned)(flen-pc-1))
k 101 net/core/flow.c int k = 0;
k 104 net/core/flow.c while ((fle = *flp) != NULL && k < shrink_to) {
k 105 net/core/flow.c k++;
k 132 net/core/flow.c u32 *k = (u32 *) key;
k 134 net/core/flow.c return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
k 1134 net/core/skbuff.c int i, k, eat = (skb->tail + delta) - skb->end;
k 1218 net/core/skbuff.c k = 0;
k 1224 net/core/skbuff.c skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
k 1226 net/core/skbuff.c skb_shinfo(skb)->frags[k].page_offset += eat;
k 1227 net/core/skbuff.c skb_shinfo(skb)->frags[k].size -= eat;
k 1230 net/core/skbuff.c k++;
k 1233 net/core/skbuff.c skb_shinfo(skb)->nr_frags = k;
k 1955 net/core/skbuff.c int i, k = 0;
k 1967 net/core/skbuff.c skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
k 1984 net/core/skbuff.c k++;
k 1989 net/core/skbuff.c skb_shinfo(skb1)->nr_frags = k;
k 2295 net/core/skbuff.c int k;
k 2337 net/core/skbuff.c k = 0;
k 2354 net/core/skbuff.c k++;
k 2367 net/core/skbuff.c skb_shinfo(nskb)->nr_frags = k;
k 68 net/dccp/ccids/lib/loss_interval.c int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */
k 70 net/dccp/ccids/lib/loss_interval.c if (k <= 0)
k 73 net/dccp/ccids/lib/loss_interval.c for (i = 0; i <= k; i++) {
k 76 net/dccp/ccids/lib/loss_interval.c if (i < k) {
k 97 net/decnet/dn_table.c dn_fib_key_t k;
k 98 net/decnet/dn_table.c k.datum = dst & DZ_MASK(dz);
k 99 net/decnet/dn_table.c return k;
k 774 net/decnet/dn_table.c dn_fib_key_t k = dz_key(flp->fld_dst, dz);
k 776 net/decnet/dn_table.c for(f = dz_chain(k, dz); f; f = f->fn_next) {
k 777 net/decnet/dn_table.c if (!dn_key_eq(k, f->fn_key)) {
k 778 net/decnet/dn_table.c if (dn_key_leq(k, f->fn_key))
k 1290 net/ipv4/arp.c int k, j;
k 1302 net/ipv4/arp.c for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < dev->addr_len; j++) {
k 1303 net/ipv4/arp.c hbuffer[k++] = hex_asc_hi(n->ha[j]);
k 1304 net/ipv4/arp.c hbuffer[k++] = hex_asc_lo(n->ha[j]);
k 1305 net/ipv4/arp.c hbuffer[k++] = ':';
k 1307 net/ipv4/arp.c hbuffer[--k] = 0;
k 257 net/ipv4/fib_hash.c __be32 k = fz_key(flp->fl4_dst, fz);
k 259 net/ipv4/fib_hash.c head = &fz->fz_hash[fn_hash(k, fz)];
k 261 net/ipv4/fib_hash.c if (f->fn_key != k)
k 210 net/ipv4/fib_trie.c return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
k 472 net/ipv4/netfilter/ipt_CLUSTERIP.c int j,k;
k 474 net/ipv4/netfilter/ipt_CLUSTERIP.c for (k=0, j=0; k < HBUFFERLEN-3 && j < ETH_ALEN; j++) {
k 475 net/ipv4/netfilter/ipt_CLUSTERIP.c hbuffer[k++] = hex_asc_hi(payload->src_hw[j]);
k 476 net/ipv4/netfilter/ipt_CLUSTERIP.c hbuffer[k++] = hex_asc_lo(payload->src_hw[j]);
k 477 net/ipv4/netfilter/ipt_CLUSTERIP.c hbuffer[k++]=':';
k 479 net/ipv4/netfilter/ipt_CLUSTERIP.c hbuffer[--k]='\0';
k 918 net/ipv4/route.c int i, k;
k 920 net/ipv4/route.c for (i = rt_hash_mask, k = rover; i >= 0; i--) {
k 923 net/ipv4/route.c k = (k + 1) & rt_hash_mask;
k 924 net/ipv4/route.c rthp = &rt_hash_table[k].chain;
k 925 net/ipv4/route.c spin_lock_bh(rt_hash_lock_addr(k));
k 937 net/ipv4/route.c spin_unlock_bh(rt_hash_lock_addr(k));
k 941 net/ipv4/route.c rover = k;
k 1198 net/ipv4/route.c int i, k;
k 1226 net/ipv4/route.c for (k = 0; k < 2; k++) {
k 1227 net/ipv4/route.c unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
k 1238 net/ipv4/route.c rth->fl.oif != ikeys[k] ||
k 1481 net/ipv4/route.c int i, k;
k 1492 net/ipv4/route.c for (k = 0; k < 2; k++) {
k 1494 net/ipv4/route.c unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
k 1506 net/ipv4/route.c rth->fl.oif != ikeys[k] ||
k 857 net/ipv4/tcp_output.c int i, k, eat;
k 860 net/ipv4/tcp_output.c k = 0;
k 866 net/ipv4/tcp_output.c skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
k 868 net/ipv4/tcp_output.c skb_shinfo(skb)->frags[k].page_offset += eat;
k 869 net/ipv4/tcp_output.c skb_shinfo(skb)->frags[k].size -= eat;
k 872 net/ipv4/tcp_output.c k++;
k 875 net/ipv4/tcp_output.c skb_shinfo(skb)->nr_frags = k;
k 2471 net/key/af_key.c struct xfrm_kmaddress k;
k 2490 net/key/af_key.c k.reserved = kma->sadb_x_kmaddress_reserved;
k 2493 net/key/af_key.c &k.local, &k.remote, &k.family);
k 2545 net/key/af_key.c kma ? &k : NULL);
k 2811 net/key/af_key.c int i, k, sz = 0;
k 2821 net/key/af_key.c for (k = 1; ; k++) {
k 2822 net/key/af_key.c struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
k 2868 net/key/af_key.c int i, k;
k 2884 net/key/af_key.c for (k = 1; ; k++) {
k 2886 net/key/af_key.c struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k);
k 3342 net/key/af_key.c int family = k->family;
k 3353 net/key/af_key.c kma->sadb_x_kmaddress_reserved = k->reserved;
k 3356 net/key/af_key.c if (!pfkey_sockaddr_fill(&k->local, 0, (struct sockaddr *)sa, family) ||
k 3357 net/key/af_key.c !pfkey_sockaddr_fill(&k->remote, 0, (struct sockaddr *)(sa+socklen), family))
k 3413 net/key/af_key.c if (k != NULL) {
k 3416 net/key/af_key.c pfkey_sockaddr_pair_size(k->family));
k 3456 net/key/af_key.c if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
k 1023 net/llc/af_llc.c llc->k = opt;
k 1080 net/llc/af_llc.c val = llc->k; break;
k 1071 net/llc/llc_c_ac.c if (llc->k - unacked_pdu < 1)
k 1072 net/llc/llc_c_ac.c llc->k = 1;
k 1074 net/llc/llc_c_ac.c llc->k -= unacked_pdu;
k 1090 net/llc/llc_c_ac.c llc->k += 1;
k 1091 net/llc/llc_c_ac.c if (llc->k > (u8) ~LLC_2_SEQ_NBR_MODULO)
k 1092 net/llc/llc_c_ac.c llc->k = (u8) ~LLC_2_SEQ_NBR_MODULO;
k 623 net/llc/llc_c_ev.c return !(skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k);
k 637 net/llc/llc_c_ev.c return skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k;
k 851 net/llc/llc_conn.c llc->k = 2; /* tx win size, will adjust dynam */
k 179 net/llc/llc_proc.c llc->retry_count, llc->k, llc->rw, llc->p_flag, llc->f_flag,
k 1836 net/packet/af_packet.c int k;
k 1838 net/packet/af_packet.c for (k = 0; k < po->frames_per_block; k++) {
k 417 net/rxrpc/ar-internal.h struct rxkad_key k;
k 141 net/rxrpc/ar-key.c memcpy(&upayload->k, tsec, sizeof(*tsec));
k 142 net/rxrpc/ar-key.c memcpy(&upayload->k.ticket, (void *)tsec + sizeof(*tsec),
k 69 net/rxrpc/rxkad.c conn->security_ix = payload->k.security_index;
k 78 net/rxrpc/rxkad.c if (crypto_blkcipher_setkey(ci, payload->k.session_key,
k 79 net/rxrpc/rxkad.c sizeof(payload->k.session_key)) < 0)
k 127 net/rxrpc/rxkad.c memcpy(&iv, payload->k.session_key, sizeof(iv));
k 223 net/rxrpc/rxkad.c memcpy(&iv, payload->k.session_key, sizeof(iv));
k 435 net/rxrpc/rxkad.c memcpy(&iv, payload->k.session_key, sizeof(iv));
k 800 net/rxrpc/rxkad.c resp.kvno = htonl(payload->k.kvno);
k 801 net/rxrpc/rxkad.c resp.ticket_len = htonl(payload->k.ticket_len);
k 805 net/rxrpc/rxkad.c rxkad_encrypt_response(conn, &resp, &payload->k);
k 806 net/rxrpc/rxkad.c return rxkad_send_response(conn, &sp->hdr, &resp, &payload->k);
k 386 net/sched/cls_rsvp.h int i, k;
k 388 net/sched/cls_rsvp.h for (k=0; k<2; k++) {
k 1888 net/xfrm/xfrm_policy.c for (; k < sp->len; k++) {
k 1889 net/xfrm/xfrm_policy.c if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
k 1890 net/xfrm/xfrm_policy.c *idxp = k;
k 1989 net/xfrm/xfrm_policy.c int i, k;
k 2019 net/xfrm/xfrm_policy.c for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
k 2020 net/xfrm/xfrm_policy.c k = xfrm_policy_ok(tpp[i], sp, k, family);
k 2021 net/xfrm/xfrm_policy.c if (k < 0) {
k 2022 net/xfrm/xfrm_policy.c if (k < -1)
k 2024 net/xfrm/xfrm_policy.c xerr_idx = -(2+k);
k 2030 net/xfrm/xfrm_policy.c if (secpath_has_nontransport(sp, k, &xerr_idx)) {
k 2727 net/xfrm/xfrm_policy.c km_migrate(sel, dir, type, m, num_migrate, k);
k 1827 net/xfrm/xfrm_state.c ret = km->migrate(sel, dir, type, m, num_migrate, k);
k 1720 net/xfrm/xfrm_user.c if (k != NULL) {
k 1724 net/xfrm/xfrm_user.c memcpy(&k->local, &uk->local, sizeof(k->local));
k 1725 net/xfrm/xfrm_user.c memcpy(&k->remote, &uk->remote, sizeof(k->remote));
k 1726 net/xfrm/xfrm_user.c k->family = uk->family;
k 1727 net/xfrm/xfrm_user.c k->reserved = uk->reserved;
k 1816 net/xfrm/xfrm_user.c uk.family = k->family;
k 1817 net/xfrm/xfrm_user.c uk.reserved = k->reserved;
k 1818 net/xfrm/xfrm_user.c memcpy(&uk.local, &k->local, sizeof(uk.local));
k 1819 net/xfrm/xfrm_user.c memcpy(&uk.remote, &k->local, sizeof(uk.remote));
k 1851 net/xfrm/xfrm_user.c if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
k 1874 net/xfrm/xfrm_user.c skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
k 1879 net/xfrm/xfrm_user.c if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0)
k 246 scripts/kallsyms.c unsigned int i, k, off;
k 304 scripts/kallsyms.c for (k = 0; k < table[i].len; k++)
k 305 scripts/kallsyms.c printf(", 0x%02x", table[i].sym[k]);
k 67 scripts/mod/sumversion.c #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s))
k 68 scripts/mod/sumversion.c #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (uint32_t)0x5A827999,s))
k 69 scripts/mod/sumversion.c #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (uint32_t)0x6ED9EBA1,s))
k 306 scripts/pnmtologo.c unsigned int i, j, k;
k 312 scripts/pnmtologo.c for (k = 0; k < 16; k++)
k 313 scripts/pnmtologo.c if (is_equal(logo_data[i][j], clut_vga16[k]))
k 315 scripts/pnmtologo.c if (k == 16)
k 327 scripts/pnmtologo.c for (k = 0; k < 16; k++)
k 328 scripts/pnmtologo.c if (is_equal(logo_data[i][j], clut_vga16[k]))
k 330 scripts/pnmtologo.c val = k<<4;
k 332 scripts/pnmtologo.c for (k = 0; k < 16; k++)
k 333 scripts/pnmtologo.c if (is_equal(logo_data[i][j], clut_vga16[k]))
k 335 scripts/pnmtologo.c val |= k;
k 346 scripts/pnmtologo.c unsigned int i, j, k;
k 351 scripts/pnmtologo.c for (k = 0; k < logo_clutsize; k++)
k 352 scripts/pnmtologo.c if (is_equal(logo_data[i][j], logo_clut[k]))
k 354 scripts/pnmtologo.c if (k == logo_clutsize) {
k 369 scripts/pnmtologo.c for (k = 0; k < logo_clutsize; k++)
k 370 scripts/pnmtologo.c if (is_equal(logo_data[i][j], logo_clut[k]))
k 372 scripts/pnmtologo.c write_hex(k+32);
k 5421 security/selinux/hooks.c k->security = ksec;
k 5428 security/selinux/hooks.c struct key_security_struct *ksec = k->security;
k 5430 security/selinux/hooks.c k->security = NULL;
k 457 security/selinux/ss/avtab.c return avtab_insert(a, k, d);
k 273 security/selinux/ss/conditional.c if (k->specified & AVTAB_TYPE) {
k 274 security/selinux/ss/conditional.c if (avtab_search(&p->te_avtab, k)) {
k 287 security/selinux/ss/conditional.c node_ptr = avtab_search_node(&p->te_cond_avtab, k);
k 289 security/selinux/ss/conditional.c if (avtab_search_node_next(node_ptr, k->specified)) {
k 306 security/selinux/ss/conditional.c if (avtab_search(&p->te_cond_avtab, k)) {
k 313 security/selinux/ss/conditional.c node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
k 1957 security/selinux/ss/policydb.c int k;
k 1962 security/selinux/ss/policydb.c for (k = 0; k < 4; k++)
k 1963 security/selinux/ss/policydb.c c->u.node6.addr[k] = nodebuf[k];
k 1964 security/selinux/ss/policydb.c for (k = 0; k < 4; k++)
k 1965 security/selinux/ss/policydb.c c->u.node6.mask[k] = nodebuf[k+4];
k 2445 security/selinux/ss/services.c char *name = k, **classes = args;
k 2483 security/selinux/ss/services.c char *name = k, **perms = args;
k 520 sound/core/pcm_lib.c c->min = muldiv32(a->min, b->min, k, &r);
k 522 sound/core/pcm_lib.c c->max = muldiv32(a->max, b->max, k, &r);
k 551 sound/core/pcm_lib.c c->min = muldiv32(a->min, k, b->max, &r);
k 554 sound/core/pcm_lib.c c->max = muldiv32(a->max, k, b->min, &r);
k 585 sound/core/pcm_lib.c unsigned int k;
k 590 sound/core/pcm_lib.c for (k = 0; k < rats_count; ++k) {
k 591 sound/core/pcm_lib.c unsigned int num = rats[k].num;
k 598 sound/core/pcm_lib.c if (den < rats[k].den_min)
k 600 sound/core/pcm_lib.c if (den > rats[k].den_max)
k 601 sound/core/pcm_lib.c den = rats[k].den_max;
k 604 sound/core/pcm_lib.c r = (den - rats[k].den_min) % rats[k].den_step;
k 624 sound/core/pcm_lib.c for (k = 0; k < rats_count; ++k) {
k 625 sound/core/pcm_lib.c unsigned int num = rats[k].num;
k 634 sound/core/pcm_lib.c if (den > rats[k].den_max)
k 636 sound/core/pcm_lib.c if (den < rats[k].den_min)
k 637 sound/core/pcm_lib.c den = rats[k].den_min;
k 640 sound/core/pcm_lib.c r = (den - rats[k].den_min) % rats[k].den_step;
k 642 sound/core/pcm_lib.c den += rats[k].den_step - r;
k 689 sound/core/pcm_lib.c unsigned int k;
k 694 sound/core/pcm_lib.c for (k = 0; k < rats_count; ++k) {
k 696 sound/core/pcm_lib.c unsigned int den = rats[k].den;
k 700 sound/core/pcm_lib.c if (num > rats[k].num_max)
k 702 sound/core/pcm_lib.c if (num < rats[k].num_min)
k 703 sound/core/pcm_lib.c num = rats[k].num_max;
k 706 sound/core/pcm_lib.c r = (num - rats[k].num_min) % rats[k].num_step;
k 708 sound/core/pcm_lib.c num += rats[k].num_step - r;
k 726 sound/core/pcm_lib.c for (k = 0; k < rats_count; ++k) {
k 728 sound/core/pcm_lib.c unsigned int den = rats[k].den;
k 732 sound/core/pcm_lib.c if (num < rats[k].num_min)
k 734 sound/core/pcm_lib.c if (num > rats[k].num_max)
k 735 sound/core/pcm_lib.c num = rats[k].num_max;
k 738 sound/core/pcm_lib.c r = (num - rats[k].num_min) % rats[k].num_step;
k 785 sound/core/pcm_lib.c unsigned int k;
k 792 sound/core/pcm_lib.c for (k = 0; k < count; k++) {
k 793 sound/core/pcm_lib.c if (mask && !(mask & (1 << k)))
k 795 sound/core/pcm_lib.c if (i->min == list[k] && !i->openmin)
k 797 sound/core/pcm_lib.c if (i->min < list[k]) {
k 798 sound/core/pcm_lib.c i->min = list[k];
k 807 sound/core/pcm_lib.c for (k = count; k-- > 0;) {
k 808 sound/core/pcm_lib.c if (mask && !(mask & (1 << k)))
k 810 sound/core/pcm_lib.c if (i->max == list[k] && !i->openmax)
k 812 sound/core/pcm_lib.c if (i->max > list[k]) {
k 813 sound/core/pcm_lib.c i->max = list[k];
k 872 sound/core/pcm_lib.c unsigned int k;
k 894 sound/core/pcm_lib.c k = 0;
k 896 sound/core/pcm_lib.c if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps)))
k 898 sound/core/pcm_lib.c c->deps[k++] = dep;
k 1193 sound/core/pcm_lib.c unsigned int k;
k 1195 sound/core/pcm_lib.c for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
k 1196 sound/core/pcm_lib.c _snd_pcm_hw_param_any(params, k);
k 1197 sound/core/pcm_lib.c for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
k 1198 sound/core/pcm_lib.c _snd_pcm_hw_param_any(params, k);
k 163 sound/core/pcm_native.c unsigned int k;
k 182 sound/core/pcm_native.c for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
k 183 sound/core/pcm_native.c m = hw_param_mask(params, k);
k 186 sound/core/pcm_native.c if (!(params->rmask & (1 << k)))
k 189 sound/core/pcm_native.c printk("%s = ", snd_pcm_hw_param_names[k]);
k 192 sound/core/pcm_native.c changed = snd_mask_refine(m, constrs_mask(constrs, k));
k 197 sound/core/pcm_native.c params->cmask |= 1 << k;
k 202 sound/core/pcm_native.c for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
k 203 sound/core/pcm_native.c i = hw_param_interval(params, k);
k 206 sound/core/pcm_native.c if (!(params->rmask & (1 << k)))
k 209 sound/core/pcm_native.c printk("%s = ", snd_pcm_hw_param_names[k]);
k 218 sound/core/pcm_native.c changed = snd_interval_refine(i, constrs_interval(constrs, k));
k 228 sound/core/pcm_native.c params->cmask |= 1 << k;
k 233 sound/core/pcm_native.c for (k = 0; k < constrs->rules_num; k++)
k 234 sound/core/pcm_native.c rstamps[k] = 0;
k 235 sound/core/pcm_native.c for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
k 236 sound/core/pcm_native.c vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
k 239 sound/core/pcm_native.c for (k = 0; k < constrs->rules_num; k++) {
k 240 sound/core/pcm_native.c struct snd_pcm_hw_rule *r = &constrs->rules[k];
k 246 sound/core/pcm_native.c if (vstamps[r->deps[d]] > rstamps[k]) {
k 254 sound/core/pcm_native.c printk("Rule %d [%p]: ", k, r->func);
k 288 sound/core/pcm_native.c rstamps[k] = stamp;
k 1710 sound/core/pcm_native.c unsigned int k;
k 1715 sound/core/pcm_native.c for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
k 1717 sound/core/pcm_native.c if (! snd_mask_test(mask, k))
k 1719 sound/core/pcm_native.c bits = snd_pcm_format_physical_width(k);
k 1723 sound/core/pcm_native.c snd_mask_reset(&m, k);
k 1732 sound/core/pcm_native.c unsigned int k;
k 1737 sound/core/pcm_native.c for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
k 1739 sound/core/pcm_native.c if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
k 1741 sound/core/pcm_native.c bits = snd_pcm_format_physical_width(k);
k 1791 sound/core/pcm_native.c int k, err;
k 1793 sound/core/pcm_native.c for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
k 1794 sound/core/pcm_native.c snd_mask_any(constrs_mask(constrs, k));
k 1797 sound/core/pcm_native.c for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
k 1798 sound/core/pcm_native.c snd_interval_any(constrs_interval(constrs, k));
k 46 sound/drivers/pcm-indirect2.c int k;
k 75 sound/drivers/pcm-indirect2.c k = 0;
k 81 sound/drivers/pcm-indirect2.c k++;
k 83 sound/drivers/pcm-indirect2.c if (((k % 8) == 0) && (k != 0)) {
k 85 sound/drivers/pcm-indirect2.c k = 0;
k 91 sound/drivers/pcm-indirect2.c k = 0;
k 97 sound/drivers/pcm-indirect2.c k++;
k 98 sound/drivers/pcm-indirect2.c if (!k)
k 243 sound/isa/sb/emu8000_patch.c int k;
k 246 sound/isa/sb/emu8000_patch.c for (k = 1; k <= looplen; k++) {
k 247 sound/isa/sb/emu8000_patch.c s = read_word(data, offset - k, sp->v.mode_flags);
k 895 sound/oss/msnd_pinnacle.c int n, k;
k 898 sound/oss/msnd_pinnacle.c k = PAGE_SIZE;
k 899 sound/oss/msnd_pinnacle.c if (k > count)
k 900 sound/oss/msnd_pinnacle.c k = count;
k 904 sound/oss/msnd_pinnacle.c n = msnd_fifo_read(&dev.DARF, page, k);
k 913 sound/oss/msnd_pinnacle.c if (n == k && count)
k 953 sound/oss/msnd_pinnacle.c int n, k;
k 956 sound/oss/msnd_pinnacle.c k = PAGE_SIZE;
k 957 sound/oss/msnd_pinnacle.c if (k > count)
k 958 sound/oss/msnd_pinnacle.c k = count;
k 960 sound/oss/msnd_pinnacle.c if (copy_from_user(page, buf, k)) {
k 967 sound/oss/msnd_pinnacle.c n = msnd_fifo_write(&dev.DAPF, page, k);
k 972 sound/oss/msnd_pinnacle.c if (count && n == k)
k 447 sound/pci/ac97/ac97_pcm.c int i, j, k;
k 503 sound/pci/ac97/ac97_pcm.c for (k = 0; k < i; k++) {
k 504 sound/pci/ac97/ac97_pcm.c if (rpcm->stream == rpcms[k].stream)
k 505 sound/pci/ac97/ac97_pcm.c tmp &= ~rpcms[k].r[0].rslots[j];
k 1455 sound/pci/emu10k1/emufx.c int j, k, l, d;
k 1457 sound/pci/emu10k1/emufx.c k = 0xb0 + (z * 8) + (j * 4);
k 1462 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(k+1), A_GPR(k), A_GPR(k+1), A_GPR(BASS_GPR + 4 + j));
k 1463 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(k), A_GPR(d), A_GPR(k), A_GPR(BASS_GPR + 2 + j));
k 1464 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(k+3), A_GPR(k+2), A_GPR(k+3), A_GPR(BASS_GPR + 8 + j));
k 1465 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMAC0, A_GPR(k+2), A_GPR_ACCU, A_GPR(k+2), A_GPR(BASS_GPR + 6 + j));
k 1466 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iACC3, A_GPR(k+2), A_GPR(k+2), A_GPR(k+2), A_C_00000000);
k 1468 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMAC0, A_C_00000000, A_C_00000000, A_GPR(k+2), A_GPR(TREBLE_GPR + 0 + j));
k 1470 sound/pci/emu10k1/emufx.c A_OP(icode, &ptr, iMACMV, A_GPR(l), A_GPR(k+2), A_GPR(l), A_GPR(TREBLE_GPR + 2 + j));
k 2165 sound/pci/emu10k1/emufx.c int j, k, l, d;
k 2167 sound/pci/emu10k1/emufx.c k = 0xa0 + (z * 8) + (j * 4);
k 2172 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(k+1), GPR(k), GPR(k+1), GPR(BASS_GPR + 4 + j));
k 2173 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(k), GPR(d), GPR(k), GPR(BASS_GPR + 2 + j));
k 2174 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(k+3), GPR(k+2), GPR(k+3), GPR(BASS_GPR + 8 + j));
k 2175 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMAC0, GPR(k+2), GPR_ACCU, GPR(k+2), GPR(BASS_GPR + 6 + j));
k 2176 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iACC3, GPR(k+2), GPR(k+2), GPR(k+2), C_00000000);
k 2178 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMAC0, C_00000000, C_00000000, GPR(k+2), GPR(TREBLE_GPR + 0 + j));
k 2180 sound/pci/emu10k1/emufx.c OP(icode, &ptr, iMACMV, GPR(l), GPR(k+2), GPR(l), GPR(TREBLE_GPR + 2 + j));
k 51 sound/pci/emu10k1/voice.c int i, j, k, first_voice, last_voice, skip;
k 66 sound/pci/emu10k1/voice.c for (k = 0; k < number; k++) {
k 67 sound/pci/emu10k1/voice.c voice = &emu->voices[(i+k) % NUM_G];
k 391 sound/pci/hda/patch_cmedia.c int i, j, k, len;
k 410 sound/pci/hda/patch_cmedia.c for (k = 0; k < len; k++)
k 411 sound/pci/hda/patch_cmedia.c if (conn[k] == spec->dac_nids[i]) {
k 412 sound/pci/hda/patch_cmedia.c spec->multi_init[j].param = k;
k 3194 sound/pci/hda/patch_sigmatel.c int i, j, k;
k 3208 sound/pci/hda/patch_sigmatel.c for (k = 0; k < num_cons; k++)
k 3209 sound/pci/hda/patch_sigmatel.c if (con_lst[k] == cfg->input_pins[i]) {
k 3210 sound/pci/hda/patch_sigmatel.c index = k;
k 140 sound/pci/mixart/mixart_hwdep.c u32 k;
k 171 sound/pci/mixart/mixart_hwdep.c for(k=0; k < connector->uid_count; k++) {
k 174 sound/pci/mixart/mixart_hwdep.c if(k < MIXART_FIRST_DIG_AUDIO_ID) {
k 175 sound/pci/mixart/mixart_hwdep.c pipe = &mgr->chip[k/2]->pipe_out_ana;
k 177 sound/pci/mixart/mixart_hwdep.c pipe = &mgr->chip[(k-MIXART_FIRST_DIG_AUDIO_ID)/2]->pipe_out_dig;
k 179 sound/pci/mixart/mixart_hwdep.c if(k & 1) {
k 180 sound/pci/mixart/mixart_hwdep.c pipe->uid_right_connector = connector->uid[k]; /* odd */
k 182 sound/pci/mixart/mixart_hwdep.c pipe->uid_left_connector = connector->uid[k]; /* even */
k 189 sound/pci/mixart/mixart_hwdep.c request.uid = connector->uid[k];
k 213 sound/pci/mixart/mixart_hwdep.c for(k=0; k < connector->uid_count; k++) {
k 216 sound/pci/mixart/mixart_hwdep.c if(k < MIXART_FIRST_DIG_AUDIO_ID) {
k 217 sound/pci/mixart/mixart_hwdep.c pipe = &mgr->chip[k/2]->pipe_in_ana;
k 219 sound/pci/mixart/mixart_hwdep.c pipe = &mgr->chip[(k-MIXART_FIRST_DIG_AUDIO_ID)/2]->pipe_in_dig;
k 221 sound/pci/mixart/mixart_hwdep.c if(k & 1) {
k 222 sound/pci/mixart/mixart_hwdep.c pipe->uid_right_connector = connector->uid[k]; /* odd */
k 224 sound/pci/mixart/mixart_hwdep.c pipe->uid_left_connector = connector->uid[k]; /* even */
k 231 sound/pci/mixart/mixart_hwdep.c request.uid = connector->uid[k];
k 254 sound/pci/mixart/mixart_hwdep.c u32 k;
k 295 sound/pci/mixart/mixart_hwdep.c for(k=0; k<mgr->num_cards; k++) {
k 296 sound/pci/mixart/mixart_hwdep.c mgr->chip[k]->uid_in_analog_physio = phys_io.uid[k];
k 297 sound/pci/mixart/mixart_hwdep.c mgr->chip[k]->uid_out_analog_physio = phys_io.uid[phys_io.nb_uid/2 + k];
k 306 sound/pci/mixart/mixart_hwdep.c u32 k;
k 321 sound/pci/mixart/mixart_hwdep.c err = snd_mixart_send_msg(mgr, &request, sizeof(k), &k);
k 322 sound/pci/mixart/mixart_hwdep.c if( (err < 0) || (k != 0) ) {
k 1288 sound/pci/rme9652/rme9652.c unsigned int k;
k 1291 sound/pci/rme9652/rme9652.c for (k = 0; k < rme9652->ss_channels; ++k) {
k 1292 sound/pci/rme9652/rme9652.c ucontrol->value.integer.value[k] = !!(thru_bits & (1 << k));
k 1822 sound/pci/rme9652/rme9652.c unsigned int k;
k 1850 sound/pci/rme9652/rme9652.c for (k = 0; k < RME9652_NCHANNELS; ++k)
k 1851 sound/pci/rme9652/rme9652.c rme9652_write(rme9652, RME9652_thru_base + k * 4, 0);
k 293 sound/soc/codecs/wm8510.c unsigned int k;
k 335 sound/soc/codecs/wm8510.c pll_div.k = K;
k 358 sound/soc/codecs/wm8510.c wm8510_write(codec, WM8510_PLLK1, pll_div.k >> 18);
k 359 sound/soc/codecs/wm8510.c wm8510_write(codec, WM8510_PLLK2, (pll_div.k >> 9) & 0x1ff);
k 360 sound/soc/codecs/wm8510.c wm8510_write(codec, WM8510_PLLK3, pll_div.k & 0x1ff);
k 396 sound/soc/codecs/wm8580.c u32 k:24;
k 469 sound/soc/codecs/wm8580.c pll_div->k = K;
k 472 sound/soc/codecs/wm8580.c pll_div->n, pll_div->k, pll_div->prescale, pll_div->freqmode,
k 528 sound/soc/codecs/wm8580.c wm8580_write(codec, WM8580_PLLA1 + offset, pll_div.k & 0x1ff);
k 529 sound/soc/codecs/wm8580.c wm8580_write(codec, WM8580_PLLA2 + offset, (pll_div.k >> 9) & 0xff);
k 531 sound/soc/codecs/wm8580.c (pll_div.k >> 18 & 0xf) | (pll_div.n << 4));
k 698 sound/soc/codecs/wm8753.c u32 k:24;
k 738 sound/soc/codecs/wm8753.c pll_div->k = K;
k 774 sound/soc/codecs/wm8753.c value = (pll_div.n << 5) + ((pll_div.k & 0x3c0000) >> 18);
k 778 sound/soc/codecs/wm8753.c value = (pll_div.k & 0x03fe00) >> 9;
k 782 sound/soc/codecs/wm8753.c value = pll_div.k & 0x0001ff;
k 766 sound/soc/codecs/wm8900.c u16 k;
k 829 sound/soc/codecs/wm8900.c fll_div->k = K / 10;
k 883 sound/soc/codecs/wm8900.c if (fll_div.k) {
k 885 sound/soc/codecs/wm8900.c (fll_div.k >> 8) | 0x100);
k 886 sound/soc/codecs/wm8900.c wm8900_write(codec, WM8900_REG_FLLCTL3, fll_div.k & 0xff);
k 991 sound/soc/codecs/wm8990.c u32 k;
k 1032 sound/soc/codecs/wm8990.c pll_div->k = K;
k 1057 sound/soc/codecs/wm8990.c wm8990_write(codec, WM8990_PLL2, (u8)(pll_div.k>>8));
k 1058 sound/soc/codecs/wm8990.c wm8990_write(codec, WM8990_PLL3, (u8)(pll_div.k & 0xFF));
k 665 sound/soc/codecs/wm9713.c u32 k:24;
k 726 sound/soc/codecs/wm9713.c pll_div->k = K;
k 753 sound/soc/codecs/wm9713.c if (pll_div.k == 0) {
k 763 sound/soc/codecs/wm9713.c reg = reg2 | (0x5 << 4) | (pll_div.k >> 20);
k 767 sound/soc/codecs/wm9713.c reg = reg2 | (0x4 << 4) | ((pll_div.k >> 16) & 0xf);
k 771 sound/soc/codecs/wm9713.c reg = reg2 | (0x3 << 4) | ((pll_div.k >> 12) & 0xf);
k 775 sound/soc/codecs/wm9713.c reg = reg2 | (0x2 << 4) | ((pll_div.k >> 8) & 0xf);
k 779 sound/soc/codecs/wm9713.c reg = reg2 | (0x1 << 4) | ((pll_div.k >> 4) & 0xf);
k 782 sound/soc/codecs/wm9713.c reg = reg2 | (0x0 << 4) | (pll_div.k & 0xf); /* K [3:0] */
k 247 sound/soc/soc-dapm.c const struct snd_kcontrol_new *k = widget->kcontrols;
k 254 sound/soc/soc-dapm.c if (widget->num_kcontrols && k) {
k 256 sound/soc/soc-dapm.c (struct soc_mixer_control *)k->private_value;