lp 127 crypto/ccm.c unsigned int lp = req->iv[0]; lp 128 crypto/ccm.c unsigned int l = lp + 1; lp 846 fs/gfs2/dir.c __be64 *lp; lp 903 fs/gfs2/dir.c lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); lp 905 fs/gfs2/dir.c for (x = sdp->sd_hash_ptrs; x--; lp++) lp 906 fs/gfs2/dir.c *lp = cpu_to_be64(bn); lp 939 fs/gfs2/dir.c __be64 *lp; lp 984 fs/gfs2/dir.c lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL); lp 987 fs/gfs2/dir.c lp[x] = cpu_to_be64(bn); lp 989 fs/gfs2/dir.c error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64), lp 997 fs/gfs2/dir.c kfree(lp); lp 1058 fs/gfs2/dir.c kfree(lp); lp 1367 fs/gfs2/dir.c __be64 *lp; lp 1381 fs/gfs2/dir.c lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS); lp 1382 fs/gfs2/dir.c if (!lp) lp 1390 fs/gfs2/dir.c error = gfs2_dir_read_data(dip, (char *)lp, lp 1403 fs/gfs2/dir.c be64_to_cpu(lp[lp_offset])); lp 1412 fs/gfs2/dir.c kfree(lp); lp 1782 fs/gfs2/dir.c __be64 *lp; lp 1792 fs/gfs2/dir.c lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS); lp 1793 fs/gfs2/dir.c if (!lp) lp 1801 fs/gfs2/dir.c error = gfs2_dir_read_data(dip, (char *)lp, lp 1812 fs/gfs2/dir.c leaf_no = be64_to_cpu(lp[lp_offset]); lp 1836 fs/gfs2/dir.c kfree(lp); lp 39 fs/gfs2/locking/dlm/lock.c struct gdlm_ls *ls = lp->ls; lp 42 fs/gfs2/locking/dlm/lock.c list_add_tail(&lp->delay_list, &ls->submit); lp 49 fs/gfs2/locking/dlm/lock.c clear_bit(LFL_AST_WAIT, &lp->flags); lp 51 fs/gfs2/locking/dlm/lock.c wake_up_bit(&lp->flags, LFL_AST_WAIT); lp 56 fs/gfs2/locking/dlm/lock.c struct gdlm_ls *ls = lp->ls; lp 59 fs/gfs2/locking/dlm/lock.c if (!list_empty(&lp->delay_list)) lp 60 fs/gfs2/locking/dlm/lock.c list_del_init(&lp->delay_list); lp 64 fs/gfs2/locking/dlm/lock.c kfree(lp); lp 69 fs/gfs2/locking/dlm/lock.c struct gdlm_ls *ls = lp->ls; lp 72 fs/gfs2/locking/dlm/lock.c list_add_tail(&lp->delay_list, &ls->delayed); lp 78 fs/gfs2/locking/dlm/lock.c struct gdlm_ls *ls = lp->ls; lp 83 fs/gfs2/locking/dlm/lock.c if (lp->lksb.sb_status == -DLM_ECANCEL) { lp 85 fs/gfs2/locking/dlm/lock.c lp->lockname.ln_type, lp 86 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp 87 fs/gfs2/locking/dlm/lock.c lp->flags); lp 89 fs/gfs2/locking/dlm/lock.c lp->req = lp->cur; lp 91 fs/gfs2/locking/dlm/lock.c if (lp->cur == DLM_LOCK_IV) lp 92 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_lkid = 0; lp 96 fs/gfs2/locking/dlm/lock.c if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) { lp 97 fs/gfs2/locking/dlm/lock.c if (lp->lksb.sb_status != -DLM_EUNLOCK) { lp 99 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_status, lp->lockname.ln_type, lp 100 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp 101 fs/gfs2/locking/dlm/lock.c lp->flags); lp 105 fs/gfs2/locking/dlm/lock.c lp->cur = DLM_LOCK_IV; lp 106 fs/gfs2/locking/dlm/lock.c lp->req = DLM_LOCK_IV; lp 107 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_lkid = 0; lp 109 fs/gfs2/locking/dlm/lock.c if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) { lp 110 fs/gfs2/locking/dlm/lock.c gdlm_delete_lp(lp); lp 116 fs/gfs2/locking/dlm/lock.c if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID) lp 117 fs/gfs2/locking/dlm/lock.c memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); lp 119 fs/gfs2/locking/dlm/lock.c if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) { lp 120 fs/gfs2/locking/dlm/lock.c if (lp->req == DLM_LOCK_PR) lp 121 fs/gfs2/locking/dlm/lock.c lp->req = DLM_LOCK_CW; lp 122 fs/gfs2/locking/dlm/lock.c else if (lp->req == DLM_LOCK_CW) lp 123 fs/gfs2/locking/dlm/lock.c lp->req = DLM_LOCK_PR; lp 131 fs/gfs2/locking/dlm/lock.c if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) { lp 133 fs/gfs2/locking/dlm/lock.c lp->lockname.ln_type, lp 134 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number); lp 135 fs/gfs2/locking/dlm/lock.c lp->req = lp->cur; lp 144 fs/gfs2/locking/dlm/lock.c if (lp->lksb.sb_status) { lp 146 fs/gfs2/locking/dlm/lock.c if ((lp->lksb.sb_status == -EAGAIN) && lp 147 fs/gfs2/locking/dlm/lock.c (lp->lkf & DLM_LKF_NOQUEUE)) { lp 148 fs/gfs2/locking/dlm/lock.c lp->req = lp->cur; lp 149 fs/gfs2/locking/dlm/lock.c if (lp->cur == DLM_LOCK_IV) lp 150 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_lkid = 0; lp 156 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_status, lp->lockname.ln_type, lp 157 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp 158 fs/gfs2/locking/dlm/lock.c lp->flags); lp 166 fs/gfs2/locking/dlm/lock.c if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) { lp 167 fs/gfs2/locking/dlm/lock.c wake_up_ast(lp); lp 177 fs/gfs2/locking/dlm/lock.c if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) { lp 178 fs/gfs2/locking/dlm/lock.c gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx", lp 179 fs/gfs2/locking/dlm/lock.c lp->lockname.ln_type, lp 180 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number); lp 181 fs/gfs2/locking/dlm/lock.c gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx", lp 182 fs/gfs2/locking/dlm/lock.c lp->lockname.ln_type, lp 183 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number); lp 185 fs/gfs2/locking/dlm/lock.c lp->cur = DLM_LOCK_NL; lp 186 fs/gfs2/locking/dlm/lock.c lp->req = lp->prev_req; lp 187 fs/gfs2/locking/dlm/lock.c lp->prev_req = DLM_LOCK_IV; lp 188 fs/gfs2/locking/dlm/lock.c lp->lkf &= ~DLM_LKF_CONVDEADLK; lp 190 fs/gfs2/locking/dlm/lock.c set_bit(LFL_NOCACHE, &lp->flags); lp 193 fs/gfs2/locking/dlm/lock.c !test_bit(LFL_NOBLOCK, &lp->flags)) lp 194 fs/gfs2/locking/dlm/lock.c gdlm_queue_delayed(lp); lp 196 fs/gfs2/locking/dlm/lock.c queue_submit(lp); lp 210 fs/gfs2/locking/dlm/lock.c !test_bit(LFL_NOBLOCK, &lp->flags) && lp 211 fs/gfs2/locking/dlm/lock.c lp->req != DLM_LOCK_NL) { lp 213 fs/gfs2/locking/dlm/lock.c lp->cur = lp->req; lp 214 fs/gfs2/locking/dlm/lock.c lp->prev_req = lp->req; lp 215 fs/gfs2/locking/dlm/lock.c lp->req = DLM_LOCK_NL; lp 216 fs/gfs2/locking/dlm/lock.c lp->lkf |= DLM_LKF_CONVERT; lp 217 fs/gfs2/locking/dlm/lock.c lp->lkf &= ~DLM_LKF_CONVDEADLK; lp 220 fs/gfs2/locking/dlm/lock.c lp->lockname.ln_type, lp 221 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp 222 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_lkid, lp->cur, lp->req); lp 224 fs/gfs2/locking/dlm/lock.c set_bit(LFL_REREQUEST, &lp->flags); lp 225 fs/gfs2/locking/dlm/lock.c queue_submit(lp); lp 234 fs/gfs2/locking/dlm/lock.c if (lp->lksb.sb_flags & DLM_SBF_DEMOTED) lp 235 fs/gfs2/locking/dlm/lock.c set_bit(LFL_NOCACHE, &lp->flags); lp 242 fs/gfs2/locking/dlm/lock.c if (test_bit(LFL_INLOCK, &lp->flags)) { lp 243 fs/gfs2/locking/dlm/lock.c clear_bit(LFL_NOBLOCK, &lp->flags); lp 244 fs/gfs2/locking/dlm/lock.c lp->cur = lp->req; lp 245 fs/gfs2/locking/dlm/lock.c wake_up_ast(lp); lp 253 fs/gfs2/locking/dlm/lock.c clear_bit(LFL_NOBLOCK, &lp->flags); lp 254 fs/gfs2/locking/dlm/lock.c lp->cur = lp->req; lp 256 fs/gfs2/locking/dlm/lock.c acb.lc_name = lp->lockname; lp 257 fs/gfs2/locking/dlm/lock.c acb.lc_ret |= gdlm_make_lmstate(lp->cur); lp 264 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp = astarg; lp 265 fs/gfs2/locking/dlm/lock.c clear_bit(LFL_ACTIVE, &lp->flags); lp 266 fs/gfs2/locking/dlm/lock.c process_complete(lp); lp 271 fs/gfs2/locking/dlm/lock.c struct gdlm_ls *ls = lp->ls; lp 288 fs/gfs2/locking/dlm/lock.c ls->fscb(ls->sdp, cb, &lp->lockname); lp 294 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp = astarg; lp 298 fs/gfs2/locking/dlm/lock.c lp->lockname.ln_type, lp 299 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number); lp 303 fs/gfs2/locking/dlm/lock.c process_blocking(lp, mode); lp 331 fs/gfs2/locking/dlm/lock.c if (lp->cur != DLM_LOCK_IV) lp 332 fs/gfs2/locking/dlm/lock.c gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur); lp 361 fs/gfs2/locking/dlm/lock.c if (lp->lksb.sb_lkid != 0) { lp 365 fs/gfs2/locking/dlm/lock.c if (lp->lvb) lp 384 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp; lp 386 fs/gfs2/locking/dlm/lock.c lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS); lp 387 fs/gfs2/locking/dlm/lock.c if (!lp) lp 390 fs/gfs2/locking/dlm/lock.c lp->lockname = *name; lp 391 fs/gfs2/locking/dlm/lock.c make_strname(name, &lp->strname); lp 392 fs/gfs2/locking/dlm/lock.c lp->ls = ls; lp 393 fs/gfs2/locking/dlm/lock.c lp->cur = DLM_LOCK_IV; lp 394 fs/gfs2/locking/dlm/lock.c INIT_LIST_HEAD(&lp->delay_list); lp 400 fs/gfs2/locking/dlm/lock.c *lpp = lp; lp 407 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp; lp 410 fs/gfs2/locking/dlm/lock.c error = gdlm_create_lp(lockspace, name, &lp); lp 412 fs/gfs2/locking/dlm/lock.c *lockp = lp; lp 423 fs/gfs2/locking/dlm/lock.c struct gdlm_ls *ls = lp->ls; lp 433 fs/gfs2/locking/dlm/lock.c !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) { lp 434 fs/gfs2/locking/dlm/lock.c gdlm_queue_delayed(lp); lp 442 fs/gfs2/locking/dlm/lock.c if (test_bit(LFL_NOBAST, &lp->flags)) lp 445 fs/gfs2/locking/dlm/lock.c set_bit(LFL_ACTIVE, &lp->flags); lp 447 fs/gfs2/locking/dlm/lock.c log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type, lp 448 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid, lp 449 fs/gfs2/locking/dlm/lock.c lp->cur, lp->req, lp->lkf); lp 451 fs/gfs2/locking/dlm/lock.c error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf, lp 452 fs/gfs2/locking/dlm/lock.c lp->strname.name, lp->strname.namelen, 0, gdlm_ast, lp 453 fs/gfs2/locking/dlm/lock.c lp, bast ? gdlm_bast : NULL); lp 455 fs/gfs2/locking/dlm/lock.c if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) { lp 456 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_status = -EAGAIN; lp 457 fs/gfs2/locking/dlm/lock.c gdlm_ast(lp); lp 463 fs/gfs2/locking/dlm/lock.c "flags=%lx", ls->fsname, lp->lockname.ln_type, lp 464 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, error, lp 465 fs/gfs2/locking/dlm/lock.c lp->cur, lp->req, lp->lkf, lp->flags); lp 473 fs/gfs2/locking/dlm/lock.c struct gdlm_ls *ls = lp->ls; lp 477 fs/gfs2/locking/dlm/lock.c set_bit(LFL_DLM_UNLOCK, &lp->flags); lp 478 fs/gfs2/locking/dlm/lock.c set_bit(LFL_ACTIVE, &lp->flags); lp 480 fs/gfs2/locking/dlm/lock.c if (lp->lvb) lp 483 fs/gfs2/locking/dlm/lock.c log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type, lp 484 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp 485 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_lkid, lp->cur, lkf); lp 487 fs/gfs2/locking/dlm/lock.c error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp); lp 491 fs/gfs2/locking/dlm/lock.c "flags=%lx", ls->fsname, lp->lockname.ln_type, lp 492 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, error, lp 493 fs/gfs2/locking/dlm/lock.c lp->cur, lp->req, lp->lkf, lp->flags); lp 502 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp = lock; lp 510 fs/gfs2/locking/dlm/lock.c clear_bit(LFL_DLM_CANCEL, &lp->flags); lp 512 fs/gfs2/locking/dlm/lock.c set_bit(LFL_NOBLOCK, &lp->flags); lp 514 fs/gfs2/locking/dlm/lock.c check_cur_state(lp, cur_state); lp 515 fs/gfs2/locking/dlm/lock.c lp->req = make_mode(req_state); lp 516 fs/gfs2/locking/dlm/lock.c lp->lkf = make_flags(lp, flags, lp->cur, lp->req); lp 518 fs/gfs2/locking/dlm/lock.c return gdlm_do_lock(lp); lp 523 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp = lock; lp 525 fs/gfs2/locking/dlm/lock.c clear_bit(LFL_DLM_CANCEL, &lp->flags); lp 526 fs/gfs2/locking/dlm/lock.c if (lp->cur == DLM_LOCK_IV) lp 528 fs/gfs2/locking/dlm/lock.c return gdlm_do_unlock(lp); lp 533 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp = lock; lp 534 fs/gfs2/locking/dlm/lock.c struct gdlm_ls *ls = lp->ls; lp 537 fs/gfs2/locking/dlm/lock.c if (test_bit(LFL_DLM_CANCEL, &lp->flags)) lp 540 fs/gfs2/locking/dlm/lock.c log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type, lp 541 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp->flags); lp 544 fs/gfs2/locking/dlm/lock.c if (!list_empty(&lp->delay_list)) { lp 545 fs/gfs2/locking/dlm/lock.c list_del_init(&lp->delay_list); lp 551 fs/gfs2/locking/dlm/lock.c set_bit(LFL_CANCEL, &lp->flags); lp 552 fs/gfs2/locking/dlm/lock.c set_bit(LFL_ACTIVE, &lp->flags); lp 553 fs/gfs2/locking/dlm/lock.c gdlm_ast(lp); lp 557 fs/gfs2/locking/dlm/lock.c if (!test_bit(LFL_ACTIVE, &lp->flags) || lp 558 fs/gfs2/locking/dlm/lock.c test_bit(LFL_DLM_UNLOCK, &lp->flags)) { lp 560 fs/gfs2/locking/dlm/lock.c lp->lockname.ln_type, lp 561 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp->flags); lp 567 fs/gfs2/locking/dlm/lock.c set_bit(LFL_DLM_CANCEL, &lp->flags); lp 568 fs/gfs2/locking/dlm/lock.c set_bit(LFL_ACTIVE, &lp->flags); lp 570 fs/gfs2/locking/dlm/lock.c error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL, lp 571 fs/gfs2/locking/dlm/lock.c NULL, lp); lp 574 fs/gfs2/locking/dlm/lock.c lp->lockname.ln_type, lp 575 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number, lp->flags); lp 578 fs/gfs2/locking/dlm/lock.c clear_bit(LFL_DLM_CANCEL, &lp->flags); lp 589 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_lvbptr = lvb; lp 590 fs/gfs2/locking/dlm/lock.c lp->lvb = lvb; lp 596 fs/gfs2/locking/dlm/lock.c kfree(lp->lvb); lp 597 fs/gfs2/locking/dlm/lock.c lp->lvb = NULL; lp 598 fs/gfs2/locking/dlm/lock.c lp->lksb.sb_lvbptr = NULL; lp 616 fs/gfs2/locking/dlm/lock.c if (lp->hold_null) { lp 621 fs/gfs2/locking/dlm/lock.c error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn); lp 644 fs/gfs2/locking/dlm/lock.c lp->hold_null = lpn; lp 655 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lpn = lp->hold_null; lp 657 fs/gfs2/locking/dlm/lock.c gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type, lp 658 fs/gfs2/locking/dlm/lock.c (unsigned long long)lp->lockname.ln_number); lp 663 fs/gfs2/locking/dlm/lock.c lp->hold_null = NULL; lp 672 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp = lock; lp 675 fs/gfs2/locking/dlm/lock.c error = gdlm_add_lvb(lp); lp 679 fs/gfs2/locking/dlm/lock.c *lvbp = lp->lvb; lp 681 fs/gfs2/locking/dlm/lock.c error = hold_null_lock(lp); lp 683 fs/gfs2/locking/dlm/lock.c gdlm_del_lvb(lp); lp 690 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp = lock; lp 692 fs/gfs2/locking/dlm/lock.c unhold_null_lock(lp); lp 693 fs/gfs2/locking/dlm/lock.c gdlm_del_lvb(lp); lp 698 fs/gfs2/locking/dlm/lock.c struct gdlm_lock *lp, *safe; lp 701 fs/gfs2/locking/dlm/lock.c list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) { lp 702 fs/gfs2/locking/dlm/lock.c list_del_init(&lp->delay_list); lp 703 fs/gfs2/locking/dlm/lock.c list_add_tail(&lp->delay_list, &ls->submit); lp 26 fs/gfs2/locking/dlm/thread.c struct gdlm_lock *lp = NULL; lp 35 fs/gfs2/locking/dlm/thread.c lp = list_entry(ls->submit.next, struct gdlm_lock, lp 37 fs/gfs2/locking/dlm/thread.c list_del_init(&lp->delay_list); lp 39 fs/gfs2/locking/dlm/thread.c gdlm_do_lock(lp); lp 2795 fs/jfs/jfs_dmap.c int lp, pp, k; lp 2800 fs/jfs/jfs_dmap.c lp = leafno + le32_to_cpu(tp->dmt_leafidx); lp 2805 fs/jfs/jfs_dmap.c if (tp->dmt_stree[lp] == newval) lp 2810 fs/jfs/jfs_dmap.c tp->dmt_stree[lp] = newval; lp 2818 fs/jfs/jfs_dmap.c lp = ((lp - 1) & ~0x03) + 1; lp 2822 fs/jfs/jfs_dmap.c pp = (lp - 1) >> 2; lp 2826 fs/jfs/jfs_dmap.c max = TREEMAX(&tp->dmt_stree[lp]); lp 2840 fs/jfs/jfs_dmap.c lp = pp; lp 940 fs/jfs/jfs_dtree.c dtpage_t *lp; /* left child page */ lp 1132 fs/jfs/jfs_dtree.c lp = sp; lp 1186 fs/jfs/jfs_dtree.c rc = ciGetLeafPrefixKey(lp, lp 1187 fs/jfs/jfs_dtree.c lp->header.nextindex-1, lp 2447 fs/jfs/jfs_dtree.c dtpage_t *p, *pp, *rp = 0, *lp= 0; lp 2507 fs/jfs/jfs_dtree.c DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc); lp 2532 fs/jfs/jfs_dtree.c lp->header.next = cpu_to_le64(nxaddr); lp 3785 fs/jfs/jfs_dtree.c dtGetKey(lp, li, &lkey, flag); lp 360 fs/jfs/jfs_logmgr.c struct logpage *lp; /* dst log page */ lp 379 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 430 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 443 fs/jfs/jfs_logmgr.c dst = (caddr_t) lp + dstoffset; lp 455 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 466 fs/jfs/jfs_logmgr.c lvd = (struct lvd *) ((caddr_t) lp + dstoffset); lp 491 fs/jfs/jfs_logmgr.c dst = (caddr_t) lp + dstoffset; lp 551 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 573 fs/jfs/jfs_logmgr.c struct logpage *lp; lp 583 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 584 fs/jfs/jfs_logmgr.c lspn = le32_to_cpu(lp->h.page); lp 634 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lp 654 fs/jfs/jfs_logmgr.c lp = (struct logpage *) nextbp->l_ldata; lp 655 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(lspn + 1); lp 656 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); lp 759 fs/jfs/jfs_logmgr.c struct logpage *lp; lp 788 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 795 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lp 803 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_ceor); lp 825 fs/jfs/jfs_logmgr.c struct logpage *lp; lp 885 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 887 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lp 949 fs/jfs/jfs_logmgr.c struct logsyncblk *lp; lp 970 fs/jfs/jfs_logmgr.c lp = list_entry(log->synclist.next, lp 972 fs/jfs/jfs_logmgr.c log->sync = lp->lsn; lp 1274 fs/jfs/jfs_logmgr.c struct logpage *lp; lp 1366 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 1370 fs/jfs/jfs_logmgr.c le16_to_cpu(lp->h.eor)); lp 1398 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 1399 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lp 1622 fs/jfs/jfs_logmgr.c struct logsyncblk *lp; lp 1625 fs/jfs/jfs_logmgr.c list_for_each_entry(lp, &log->synclist, synclist) { lp 1626 fs/jfs/jfs_logmgr.c if (lp->xflag & COMMIT_PAGE) { lp 1627 fs/jfs/jfs_logmgr.c struct metapage *mp = (struct metapage *)lp; lp 1638 fs/jfs/jfs_logmgr.c lp, sizeof(struct tblock), 0); lp 1669 fs/jfs/jfs_logmgr.c struct logpage *lp; lp 1687 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 1688 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(bp->l_eor); lp 2393 fs/jfs/jfs_logmgr.c struct logpage *lp; lp 2460 fs/jfs/jfs_logmgr.c lp = (struct logpage *) bp->l_ldata; lp 2465 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(npages - 3); lp 2466 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE + LOGRDSIZE); lp 2468 fs/jfs/jfs_logmgr.c lrd_ptr = (struct lrd *) &lp->data; lp 2485 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(lspn); lp 2486 fs/jfs/jfs_logmgr.c lp->h.eor = lp->t.eor = cpu_to_le16(LOGPHDRSIZE); lp 2829 fs/jfs/jfs_xtree.c xtpage_t *p, *pp, *rp, *lp; /* base B+-tree index page */ lp 3003 fs/jfs/jfs_xtree.c XT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc); lp 3021 fs/jfs/jfs_xtree.c lp->header.next = cpu_to_le64(nxaddr); lp 627 fs/ubifs/debug.c "flags %#x\n", lp->lnum, lp->free, lp->dirty, lp 628 fs/ubifs/debug.c c->leb_size - lp->free - lp->dirty, lp->flags); lp 634 fs/ubifs/debug.c struct ubifs_lprops lp; lp 642 fs/ubifs/debug.c err = ubifs_read_one_lp(c, lnum, &lp); lp 646 fs/ubifs/debug.c dbg_dump_lprop(c, &lp); lp 745 fs/ubifs/debug.c struct ubifs_lprops *lp = &pnode->lprops[i]; lp 748 fs/ubifs/debug.c i, lp->free, lp->dirty, lp->flags, lp->lnum); lp 237 fs/ubifs/find.c const struct ubifs_lprops *lp = NULL, *idx_lp = NULL; lp 264 fs/ubifs/find.c lp = ubifs_fast_find_empty(c); lp 265 fs/ubifs/find.c if (lp) lp 269 fs/ubifs/find.c lp = ubifs_fast_find_freeable(c); lp 270 fs/ubifs/find.c if (lp) lp 305 fs/ubifs/find.c lp = heap->arr[0]; lp 306 fs/ubifs/find.c if (lp->dirty + lp->free < min_space) lp 307 fs/ubifs/find.c lp = NULL; lp 311 fs/ubifs/find.c if (idx_lp && lp) { lp 312 fs/ubifs/find.c if (idx_lp->free + idx_lp->dirty >= lp->free + lp->dirty) lp 313 fs/ubifs/find.c lp = idx_lp; lp 314 fs/ubifs/find.c } else if (idx_lp && !lp) lp 315 fs/ubifs/find.c lp = idx_lp; lp 317 fs/ubifs/find.c if (lp) { lp 318 fs/ubifs/find.c ubifs_assert(lp->free + lp->dirty >= c->dead_wm); lp 324 fs/ubifs/find.c lp = scan_for_dirty(c, min_space, pick_free, exclude_index); lp 325 fs/ubifs/find.c if (IS_ERR(lp)) { lp 326 fs/ubifs/find.c err = PTR_ERR(lp); lp 329 fs/ubifs/find.c ubifs_assert(lp->dirty >= c->dead_wm || lp 330 fs/ubifs/find.c (pick_free && lp->free + lp->dirty == c->leb_size)); lp 334 fs/ubifs/find.c lp->lnum, lp->free, lp->dirty, lp->flags); lp 336 fs/ubifs/find.c lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp 337 fs/ubifs/find.c lp->flags | LPROPS_TAKEN, 0); lp 338 fs/ubifs/find.c if (IS_ERR(lp)) { lp 339 fs/ubifs/find.c err = PTR_ERR(lp); lp 343 fs/ubifs/find.c memcpy(ret_lp, lp, sizeof(struct ubifs_lprops)); lp 892 fs/ubifs/find.c const struct ubifs_lprops *lp; lp 903 fs/ubifs/find.c lp = ubifs_lpt_lookup_dirty(c, lnum); lp 904 fs/ubifs/find.c if (unlikely(IS_ERR(lp))) lp 905 fs/ubifs/find.c return PTR_ERR(lp); lp 906 fs/ubifs/find.c lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp 907 fs/ubifs/find.c lp->flags | LPROPS_INDEX, -1); lp 908 fs/ubifs/find.c if (unlikely(IS_ERR(lp))) lp 909 fs/ubifs/find.c return PTR_ERR(lp); lp 911 fs/ubifs/find.c lp->lnum, lp->dirty, lp->free, lp->flags); lp 921 fs/ubifs/find.c const struct ubifs_lprops *lp; lp 929 fs/ubifs/find.c lp = ubifs_lpt_lookup(c, lnum); lp 930 fs/ubifs/find.c if (IS_ERR(lp)) lp 931 fs/ubifs/find.c return PTR_ERR(lp); lp 932 fs/ubifs/find.c if ((lp->flags & LPROPS_TAKEN) || !(lp->flags & LPROPS_INDEX)) lp 934 fs/ubifs/find.c lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp 935 fs/ubifs/find.c lp->flags | LPROPS_TAKEN, 0); lp 936 fs/ubifs/find.c if (IS_ERR(lp)) lp 937 fs/ubifs/find.c return PTR_ERR(lp); lp 940 fs/ubifs/find.c dbg_find("LEB %d, dirty %d and free %d flags %#x", lp->lnum, lp->dirty, lp 941 fs/ubifs/find.c lp->free, lp->flags); lp 942 fs/ubifs/find.c ubifs_assert(lp->flags | LPROPS_TAKEN); lp 943 fs/ubifs/find.c ubifs_assert(lp->flags | LPROPS_INDEX); lp 275 fs/ubifs/gc.c int err = 0, lnum = lp->lnum; lp 297 fs/ubifs/gc.c lnum, lp->free, lp->dirty); lp 333 fs/ubifs/gc.c lnum, lp->free, lp->dirty); lp 421 fs/ubifs/gc.c struct ubifs_lprops lp; lp 479 fs/ubifs/gc.c ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1); lp 487 fs/ubifs/gc.c "(min. space %d)", lp.lnum, lp.free, lp.dirty, lp 488 fs/ubifs/gc.c lp.free + lp.dirty, min_space); lp 490 fs/ubifs/gc.c if (lp.free + lp.dirty == c->leb_size) { lp 492 fs/ubifs/gc.c dbg_gc("LEB %d is free, return it", lp.lnum); lp 497 fs/ubifs/gc.c ubifs_assert(!(lp.flags & LPROPS_INDEX)); lp 498 fs/ubifs/gc.c if (lp.free != c->leb_size) { lp 508 fs/ubifs/gc.c ret = ubifs_change_one_lp(c, lp.lnum, lp 514 fs/ubifs/gc.c ret = ubifs_leb_unmap(c, lp.lnum); lp 517 fs/ubifs/gc.c ret = lp.lnum; lp 525 fs/ubifs/gc.c ret = ubifs_garbage_collect_leb(c, &lp); lp 536 fs/ubifs/gc.c err = ubifs_return_leb(c, lp.lnum); lp 546 fs/ubifs/gc.c dbg_gc("LEB %d freed, return", lp.lnum); lp 547 fs/ubifs/gc.c ret = lp.lnum; lp 558 fs/ubifs/gc.c dbg_gc("indexing LEB %d freed, continue", lp.lnum); lp 564 fs/ubifs/gc.c dbg_gc("LEB %d retained, freed %d bytes", lp.lnum, lp 627 fs/ubifs/gc.c ubifs_return_leb(c, lp.lnum); lp 645 fs/ubifs/gc.c const struct ubifs_lprops *lp; lp 655 fs/ubifs/gc.c lp = ubifs_fast_find_freeable(c); lp 656 fs/ubifs/gc.c if (unlikely(IS_ERR(lp))) { lp 657 fs/ubifs/gc.c err = PTR_ERR(lp); lp 660 fs/ubifs/gc.c if (!lp) lp 662 fs/ubifs/gc.c ubifs_assert(!(lp->flags & LPROPS_TAKEN)); lp 663 fs/ubifs/gc.c ubifs_assert(!(lp->flags & LPROPS_INDEX)); lp 664 fs/ubifs/gc.c err = ubifs_leb_unmap(c, lp->lnum); lp 667 fs/ubifs/gc.c lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0); lp 668 fs/ubifs/gc.c if (unlikely(IS_ERR(lp))) { lp 669 fs/ubifs/gc.c err = PTR_ERR(lp); lp 672 fs/ubifs/gc.c ubifs_assert(!(lp->flags & LPROPS_TAKEN)); lp 673 fs/ubifs/gc.c ubifs_assert(!(lp->flags & LPROPS_INDEX)); lp 682 fs/ubifs/gc.c lp = ubifs_fast_find_frdi_idx(c); lp 683 fs/ubifs/gc.c if (unlikely(IS_ERR(lp))) { lp 684 fs/ubifs/gc.c err = PTR_ERR(lp); lp 687 fs/ubifs/gc.c if (!lp) lp 694 fs/ubifs/gc.c ubifs_assert(!(lp->flags & LPROPS_TAKEN)); lp 695 fs/ubifs/gc.c ubifs_assert(lp->flags & LPROPS_INDEX); lp 697 fs/ubifs/gc.c flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX; lp 698 fs/ubifs/gc.c lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1); lp 699 fs/ubifs/gc.c if (unlikely(IS_ERR(lp))) { lp 700 fs/ubifs/gc.c err = PTR_ERR(lp); lp 704 fs/ubifs/gc.c ubifs_assert(lp->flags & LPROPS_TAKEN); lp 705 fs/ubifs/gc.c ubifs_assert(!(lp->flags & LPROPS_INDEX)); lp 706 fs/ubifs/gc.c idx_gc->lnum = lp->lnum; lp 196 fs/ubifs/lprops.c struct ubifs_lprops *lp; lp 198 fs/ubifs/lprops.c lp = heap->arr[cpos]; lp 199 fs/ubifs/lprops.c lp->flags &= ~LPROPS_CAT_MASK; lp 200 fs/ubifs/lprops.c lp->flags |= LPROPS_UNCAT; lp 201 fs/ubifs/lprops.c list_add(&lp->list, &c->uncat_list); lp 551 fs/ubifs/lprops.c struct ubifs_lprops *lprops = (struct ubifs_lprops *)lp; lp 695 fs/ubifs/lprops.c const struct ubifs_lprops *lp; lp 699 fs/ubifs/lprops.c lp = ubifs_lpt_lookup_dirty(c, lnum); lp 700 fs/ubifs/lprops.c if (IS_ERR(lp)) { lp 701 fs/ubifs/lprops.c err = PTR_ERR(lp); lp 705 fs/ubifs/lprops.c flags = (lp->flags | flags_set) & ~flags_clean; lp 706 fs/ubifs/lprops.c lp = ubifs_change_lp(c, lp, free, dirty, flags, idx_gc_cnt); lp 707 fs/ubifs/lprops.c if (IS_ERR(lp)) lp 708 fs/ubifs/lprops.c err = PTR_ERR(lp); lp 731 fs/ubifs/lprops.c const struct ubifs_lprops *lp; lp 735 fs/ubifs/lprops.c lp = ubifs_lpt_lookup_dirty(c, lnum); lp 736 fs/ubifs/lprops.c if (IS_ERR(lp)) { lp 737 fs/ubifs/lprops.c err = PTR_ERR(lp); lp 741 fs/ubifs/lprops.c flags = (lp->flags | flags_set) & ~flags_clean; lp 742 fs/ubifs/lprops.c lp = ubifs_change_lp(c, lp, free, lp->dirty + dirty, flags, 0); lp 743 fs/ubifs/lprops.c if (IS_ERR(lp)) lp 744 fs/ubifs/lprops.c err = PTR_ERR(lp); lp 774 fs/ubifs/lprops.c memcpy(lp, lpp, sizeof(struct ubifs_lprops)); lp 992 fs/ubifs/lprops.c struct ubifs_lprops *lp; lp 1003 fs/ubifs/lprops.c lp = ubifs_lpt_lookup(c, lprops->lnum); lp 1004 fs/ubifs/lprops.c if (IS_ERR(lp)) { lp 1008 fs/ubifs/lprops.c if (lprops != lp) { lp 1010 fs/ubifs/lprops.c (size_t)lprops, (size_t)lp, lprops->lnum, lp 1011 fs/ubifs/lprops.c lp->lnum); lp 1016 fs/ubifs/lprops.c lp = heap->arr[j]; lp 1017 fs/ubifs/lprops.c if (lp == lprops) { lp 1021 fs/ubifs/lprops.c if (lp->lnum == lprops->lnum) { lp 1064 fs/ubifs/lprops.c int cat, lnum = lp->lnum, is_idx = 0, used = 0, free, dirty; lp 1066 fs/ubifs/lprops.c cat = lp->flags & LPROPS_CAT_MASK; lp 1068 fs/ubifs/lprops.c cat = ubifs_categorize_lprops(c, lp); lp 1069 fs/ubifs/lprops.c if (cat != (lp->flags & LPROPS_CAT_MASK)) { lp 1071 fs/ubifs/lprops.c (lp->flags & LPROPS_CAT_MASK), cat); lp 1099 fs/ubifs/lprops.c if (lprops == lp) { lp 1115 fs/ubifs/lprops.c if ((lp->hpos != -1 && heap->arr[lp->hpos]->lnum != lnum) || lp 1116 fs/ubifs/lprops.c lp != heap->arr[lp->hpos]) { lp 1128 fs/ubifs/lprops.c if (lp->free == c->leb_size) { lp 1137 fs/ubifs/lprops.c if (lp->free + lp->dirty == c->leb_size && lp 1138 fs/ubifs/lprops.c !(lp->flags & LPROPS_INDEX)) { lp 1141 fs/ubifs/lprops.c lst->total_free += lp->free; lp 1142 fs/ubifs/lprops.c lst->total_dirty += lp->dirty; lp 1191 fs/ubifs/lprops.c if (lp->free + lp->dirty == c->leb_size && lp 1193 fs/ubifs/lprops.c if ((is_idx && !(lp->flags & LPROPS_INDEX)) || lp 1195 fs/ubifs/lprops.c lp->free == c->leb_size) { lp 1203 fs/ubifs/lprops.c free = lp->free; lp 1204 fs/ubifs/lprops.c dirty = lp->dirty; lp 1208 fs/ubifs/lprops.c if (is_idx && lp->free + lp->dirty == free + dirty && lp 1221 fs/ubifs/lprops.c free = lp->free; lp 1222 fs/ubifs/lprops.c dirty = lp->dirty; lp 1225 fs/ubifs/lprops.c if (lp->free != free || lp->dirty != dirty) lp 1228 fs/ubifs/lprops.c if (is_idx && !(lp->flags & LPROPS_INDEX)) { lp 1239 fs/ubifs/lprops.c if (!is_idx && (lp->flags & LPROPS_INDEX)) { lp 1250 fs/ubifs/lprops.c if (!(lp->flags & LPROPS_INDEX)) lp 1255 fs/ubifs/lprops.c if (!(lp->flags & LPROPS_INDEX)) { lp 1271 fs/ubifs/lprops.c lnum, lp->free, lp->dirty, lp->flags, free, dirty); lp 2069 fs/ubifs/lpt.c struct ubifs_lprops *lp, *lprops = &pnode->lprops[i]; lp 2144 fs/ubifs/lpt.c list_for_each_entry(lp, list, list) lp 2145 fs/ubifs/lpt.c if (lprops == lp) { lp 1085 fs/ubifs/recovery.c struct ubifs_lprops lp; lp 1101 fs/ubifs/recovery.c err = ubifs_find_dirty_leb(c, &lp, wbuf->offs, 2); lp 1107 fs/ubifs/recovery.c ubifs_assert(!(lp.flags & LPROPS_INDEX)); lp 1108 fs/ubifs/recovery.c lnum = lp.lnum; lp 1109 fs/ubifs/recovery.c if (lp.free + lp.dirty == c->leb_size) { lp 1111 fs/ubifs/recovery.c if (lp.free != c->leb_size) { lp 1130 fs/ubifs/recovery.c if (lp.free + lp.dirty < wbuf->offs) { lp 1152 fs/ubifs/recovery.c err = ubifs_garbage_collect_leb(c, &lp); lp 108 fs/ubifs/replay.c const struct ubifs_lprops *lp; lp 113 fs/ubifs/replay.c lp = ubifs_lpt_lookup_dirty(c, r->lnum); lp 114 fs/ubifs/replay.c if (IS_ERR(lp)) { lp 115 fs/ubifs/replay.c err = PTR_ERR(lp); lp 119 fs/ubifs/replay.c dirty = lp->dirty; lp 120 fs/ubifs/replay.c if (r->offs == 0 && (lp->free != c->leb_size || lp->dirty != 0)) { lp 140 fs/ubifs/replay.c lp->free, lp->dirty); lp 142 fs/ubifs/replay.c lp->free, lp->dirty); lp 143 fs/ubifs/replay.c dirty -= c->leb_size - lp->free; lp 153 fs/ubifs/replay.c "replay: %d free %d dirty", r->lnum, lp->free, lp 154 fs/ubifs/replay.c lp->dirty, r->free, r->dirty); lp 156 fs/ubifs/replay.c lp = ubifs_change_lp(c, lp, r->free, dirty + r->dirty, lp 157 fs/ubifs/replay.c lp->flags | LPROPS_TAKEN, 0); lp 158 fs/ubifs/replay.c if (IS_ERR(lp)) { lp 159 fs/ubifs/replay.c err = PTR_ERR(lp); lp 976 fs/ubifs/replay.c const struct ubifs_lprops *lp; lp 981 fs/ubifs/replay.c lp = ubifs_lpt_lookup_dirty(c, c->ihead_lnum); lp 982 fs/ubifs/replay.c if (IS_ERR(lp)) { lp 983 fs/ubifs/replay.c err = PTR_ERR(lp); lp 987 fs/ubifs/replay.c free = lp->free; lp 989 fs/ubifs/replay.c lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC, lp 990 fs/ubifs/replay.c lp->flags | LPROPS_TAKEN, 0); lp 991 fs/ubifs/replay.c if (IS_ERR(lp)) { lp 992 fs/ubifs/replay.c err = PTR_ERR(lp); lp 297 fs/ubifs/tnc_commit.c struct ubifs_lprops lp; lp 300 fs/ubifs/tnc_commit.c err = ubifs_read_one_lp(c, lnum, &lp); lp 303 fs/ubifs/tnc_commit.c if (lp.free == c->leb_size) { lp 436 fs/xfs/quota/xfs_dquot_item.c xfs_dq_logitem_t *lp; lp 437 fs/xfs/quota/xfs_dquot_item.c lp = &dqp->q_logitem; lp 439 fs/xfs/quota/xfs_dquot_item.c lp->qli_item.li_type = XFS_LI_DQUOT; lp 440 fs/xfs/quota/xfs_dquot_item.c lp->qli_item.li_ops = &xfs_dquot_item_ops; lp 441 fs/xfs/quota/xfs_dquot_item.c lp->qli_item.li_mountp = dqp->q_mount; lp 442 fs/xfs/quota/xfs_dquot_item.c lp->qli_dquot = dqp; lp 443 fs/xfs/quota/xfs_dquot_item.c lp->qli_format.qlf_type = XFS_LI_DQUOT; lp 444 fs/xfs/quota/xfs_dquot_item.c lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id); lp 445 fs/xfs/quota/xfs_dquot_item.c lp->qli_format.qlf_blkno = dqp->q_blkno; lp 446 fs/xfs/quota/xfs_dquot_item.c lp->qli_format.qlf_len = 1; lp 454 fs/xfs/quota/xfs_dquot_item.c lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset; lp 63 fs/xfs/quota/xfs_trans_dquot.c xfs_dq_logitem_t *lp; lp 68 fs/xfs/quota/xfs_trans_dquot.c lp = &dqp->q_logitem; lp 73 fs/xfs/quota/xfs_trans_dquot.c (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp)); lp 2844 fs/xfs/xfs_attr_leaf.c xfs_attr_inactive_list_t *list, *lp; lp 2881 fs/xfs/xfs_attr_leaf.c lp = list; lp 2888 fs/xfs/xfs_attr_leaf.c lp->valueblk = be32_to_cpu(name_rmt->valueblk); lp 2889 fs/xfs/xfs_attr_leaf.c lp->valuelen = XFS_B_TO_FSB(dp->i_mount, lp 2891 fs/xfs/xfs_attr_leaf.c lp++; lp 2901 fs/xfs/xfs_attr_leaf.c for (lp = list, i = 0; i < count; i++, lp++) { lp 2903 fs/xfs/xfs_attr_leaf.c lp->valueblk, lp->valuelen); lp 98 fs/xfs/xfs_dir2_leaf.h ((char *)(lp) + (mp)->m_dirblksize - lp 2885 fs/xfs/xfs_log_recover.c xfs_caddr_t lp; lp 2894 fs/xfs/xfs_log_recover.c lp = dp + be32_to_cpu(rhead->h_len); lp 2901 fs/xfs/xfs_log_recover.c while ((dp < lp) && num_logops) { lp 2902 fs/xfs/xfs_log_recover.c ASSERT(dp + sizeof(xlog_op_header_t) <= lp); lp 2920 fs/xfs/xfs_log_recover.c if (dp + be32_to_cpu(ohead->oh_len) > lp) { lp 1748 fs/xfs/xfs_vnodeops.c xfs_log_item_t *lp; lp 1770 fs/xfs/xfs_vnodeops.c lp = (xfs_log_item_t *)ips[j]->i_itemp; lp 1771 fs/xfs/xfs_vnodeops.c if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { lp 1855 fs/xfs/xfs_vnodeops.c xfs_log_item_t *lp; lp 1875 fs/xfs/xfs_vnodeops.c lp = (xfs_log_item_t *)ip0->i_itemp; lp 1876 fs/xfs/xfs_vnodeops.c if (lp && (lp->li_flags & XFS_LI_IN_AIL)) { lp 323 include/linux/arcdevice.h #define ARCRESET(x) (lp->hw.reset(dev, (x))) lp 324 include/linux/arcdevice.h #define ACOMMAND(x) (lp->hw.command(dev, (x))) lp 325 include/linux/arcdevice.h #define ASTATUS() (lp->hw.status(dev)) lp 326 include/linux/arcdevice.h #define AINTMASK(x) (lp->hw.intmask(dev, (x))) lp 85 include/linux/com20020.h lp->config = (lp->config & ~0x03) | (x); \ lp 99 include/linux/com20020.h #define ARCRESET { outb(lp->config | 0x80, _CONFIG); \ lp 101 include/linux/com20020.h outb(lp->config , _CONFIG); \ lp 113 include/linux/com20020.h #define SETCONF outb(lp->config, _CONFIG) lp 225 include/linux/isdn_ppp.h struct isdn_net_local_s *lp; lp 1428 include/linux/security.h struct sched_param *lp); lp 2256 include/linux/security.h return cap_task_setscheduler(p, policy, lp); lp 137 kernel/power/snapshot.c struct linked_page *lp = list->next; lp 140 kernel/power/snapshot.c list = lp; lp 180 kernel/power/snapshot.c struct linked_page *lp; lp 182 kernel/power/snapshot.c lp = get_image_page(ca->gfp_mask, ca->safe_needed); lp 183 kernel/power/snapshot.c if (!lp) lp 186 kernel/power/snapshot.c lp->next = ca->chain; lp 187 kernel/power/snapshot.c ca->chain = lp; lp 1708 kernel/power/snapshot.c struct linked_page *sp_list, *lp; lp 1742 kernel/power/snapshot.c lp = get_image_page(GFP_ATOMIC, PG_SAFE); lp 1743 kernel/power/snapshot.c if (!lp) { lp 1747 kernel/power/snapshot.c lp->next = sp_list; lp 1748 kernel/power/snapshot.c sp_list = lp; lp 1755 kernel/power/snapshot.c lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); lp 1756 kernel/power/snapshot.c if (!lp) { lp 1760 kernel/power/snapshot.c if (!swsusp_page_is_free(virt_to_page(lp))) { lp 1762 kernel/power/snapshot.c lp->next = safe_pages_list; lp 1763 kernel/power/snapshot.c safe_pages_list = lp; lp 1766 kernel/power/snapshot.c swsusp_set_page_forbidden(virt_to_page(lp)); lp 1767 kernel/power/snapshot.c swsusp_set_page_free(virt_to_page(lp)); lp 1772 kernel/power/snapshot.c lp = sp_list->next; lp 1774 kernel/power/snapshot.c sp_list = lp; lp 5356 kernel/sched.c struct sched_param lp; lp 5373 kernel/sched.c lp.sched_priority = p->rt_priority; lp 5379 kernel/sched.c retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; lp 232 net/decnet/dn_neigh.c struct dn_long_packet *lp; lp 251 net/decnet/dn_neigh.c lp = (struct dn_long_packet *)(data+3); lp 256 net/decnet/dn_neigh.c lp->msgflg = DN_RT_PKT_LONG|(cb->rt_flags&(DN_RT_F_IE|DN_RT_F_RQR|DN_RT_F_RTS)); lp 257 net/decnet/dn_neigh.c lp->d_area = lp->d_subarea = 0; lp 258 net/decnet/dn_neigh.c dn_dn2eth(lp->d_id, cb->dst); lp 259 net/decnet/dn_neigh.c lp->s_area = lp->s_subarea = 0; lp 260 net/decnet/dn_neigh.c dn_dn2eth(lp->s_id, cb->src); lp 261 net/decnet/dn_neigh.c lp->nl2 = 0; lp 262 net/decnet/dn_neigh.c lp->visit_ct = cb->hops & 0x3f; lp 263 net/decnet/dn_neigh.c lp->s_class = 0; lp 264 net/decnet/dn_neigh.c lp->pt = 0; lp 680 net/ipv4/netfilter/nf_nat_snmp_basic.c unsigned long *lp, *id; lp 773 net/ipv4/netfilter/nf_nat_snmp_basic.c if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) { lp 780 net/ipv4/netfilter/nf_nat_snmp_basic.c kfree(lp); lp 786 net/ipv4/netfilter/nf_nat_snmp_basic.c memcpy((*obj)->syntax.ul, lp, len); lp 787 net/ipv4/netfilter/nf_nat_snmp_basic.c kfree(lp); lp 97 net/ipv4/tcp_lp.c struct lp *lp = inet_csk_ca(sk); lp 99 net/ipv4/tcp_lp.c lp->flag = 0; lp 100 net/ipv4/tcp_lp.c lp->sowd = 0; lp 101 net/ipv4/tcp_lp.c lp->owd_min = 0xffffffff; lp 102 net/ipv4/tcp_lp.c lp->owd_max = 0; lp 103 net/ipv4/tcp_lp.c lp->owd_max_rsv = 0; lp 104 net/ipv4/tcp_lp.c lp->remote_hz = 0; lp 105 net/ipv4/tcp_lp.c lp->remote_ref_time = 0; lp 106 net/ipv4/tcp_lp.c lp->local_ref_time = 0; lp 107 net/ipv4/tcp_lp.c lp->last_drop = 0; lp 108 net/ipv4/tcp_lp.c lp->inference = 0; lp 120 net/ipv4/tcp_lp.c struct lp *lp = inet_csk_ca(sk); lp 122 net/ipv4/tcp_lp.c if (!(lp->flag & LP_WITHIN_INF)) lp 136 net/ipv4/tcp_lp.c struct lp *lp = inet_csk_ca(sk); lp 137 net/ipv4/tcp_lp.c s64 rhz = lp->remote_hz << 6; /* remote HZ << 6 */ lp 142 net/ipv4/tcp_lp.c if (lp->remote_ref_time == 0 || lp->local_ref_time == 0) lp 146 net/ipv4/tcp_lp.c if (tp->rx_opt.rcv_tsval == lp->remote_ref_time lp 147 net/ipv4/tcp_lp.c || tp->rx_opt.rcv_tsecr == lp->local_ref_time) lp 151 net/ipv4/tcp_lp.c lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr - lp 152 net/ipv4/tcp_lp.c lp->local_ref_time); lp 165 net/ipv4/tcp_lp.c lp->flag |= LP_VALID_RHZ; lp 167 net/ipv4/tcp_lp.c lp->flag &= ~LP_VALID_RHZ; lp 170 net/ipv4/tcp_lp.c lp->remote_ref_time = tp->rx_opt.rcv_tsval; lp 171 net/ipv4/tcp_lp.c lp->local_ref_time = tp->rx_opt.rcv_tsecr; lp 189 net/ipv4/tcp_lp.c struct lp *lp = inet_csk_ca(sk); lp 192 net/ipv4/tcp_lp.c lp->remote_hz = tcp_lp_remote_hz_estimator(sk); lp 194 net/ipv4/tcp_lp.c if (lp->flag & LP_VALID_RHZ) { lp 196 net/ipv4/tcp_lp.c tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) - lp 203 net/ipv4/tcp_lp.c lp->flag |= LP_VALID_OWD; lp 205 net/ipv4/tcp_lp.c lp->flag &= ~LP_VALID_OWD; lp 222 net/ipv4/tcp_lp.c struct lp *lp = inet_csk_ca(sk); lp 226 net/ipv4/tcp_lp.c if (!(lp->flag & LP_VALID_RHZ) || !(lp->flag & LP_VALID_OWD)) lp 230 net/ipv4/tcp_lp.c if (mowd < lp->owd_min) lp 231 net/ipv4/tcp_lp.c lp->owd_min = mowd; lp 235 net/ipv4/tcp_lp.c if (mowd > lp->owd_max) { lp 236 net/ipv4/tcp_lp.c if (mowd > lp->owd_max_rsv) { lp 237 net/ipv4/tcp_lp.c if (lp->owd_max_rsv == 0) lp 238 net/ipv4/tcp_lp.c lp->owd_max = mowd; lp 240 net/ipv4/tcp_lp.c lp->owd_max = lp->owd_max_rsv; lp 241 net/ipv4/tcp_lp.c lp->owd_max_rsv = mowd; lp 243 net/ipv4/tcp_lp.c lp->owd_max = mowd; lp 247 net/ipv4/tcp_lp.c if (lp->sowd != 0) { lp 248 net/ipv4/tcp_lp.c mowd -= lp->sowd >> 3; /* m is now error in owd est */ lp 249 net/ipv4/tcp_lp.c lp->sowd += mowd; /* owd = 7/8 owd + 1/8 new */ lp 251 net/ipv4/tcp_lp.c lp->sowd = mowd << 3; /* take the measured time be owd */ lp 266 net/ipv4/tcp_lp.c struct lp *lp = inet_csk_ca(sk); lp 273 net/ipv4/tcp_lp.c lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr); lp 276 net/ipv4/tcp_lp.c if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference)) lp 277 net/ipv4/tcp_lp.c lp->flag |= LP_WITHIN_INF; lp 279 net/ipv4/tcp_lp.c lp->flag &= ~LP_WITHIN_INF; lp 282 net/ipv4/tcp_lp.c if (lp->sowd >> 3 < lp 283 net/ipv4/tcp_lp.c lp->owd_min + 15 * (lp->owd_max - lp->owd_min) / 100) lp 284 net/ipv4/tcp_lp.c lp->flag |= LP_WITHIN_THR; lp 286 net/ipv4/tcp_lp.c lp->flag &= ~LP_WITHIN_THR; lp 288 net/ipv4/tcp_lp.c pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag, lp 289 net/ipv4/tcp_lp.c tp->snd_cwnd, lp->remote_hz, lp->owd_min, lp->owd_max, lp 290 net/ipv4/tcp_lp.c lp->sowd >> 3); lp 292 net/ipv4/tcp_lp.c if (lp->flag & LP_WITHIN_THR) lp 298 net/ipv4/tcp_lp.c lp->owd_min = lp->sowd >> 3; lp 299 net/ipv4/tcp_lp.c lp->owd_max = lp->sowd >> 2; lp 300 net/ipv4/tcp_lp.c lp->owd_max_rsv = lp->sowd >> 2; lp 304 net/ipv4/tcp_lp.c if (lp->flag & LP_WITHIN_INF) lp 313 net/ipv4/tcp_lp.c lp->last_drop = tcp_time_stamp; lp 330 net/ipv4/tcp_lp.c BUILD_BUG_ON(sizeof(struct lp) > ICSK_CA_PRIV_SIZE); lp 57 net/ipv6/netfilter/ip6t_hbh.c const u_int8_t *lp = NULL; lp 124 net/ipv6/netfilter/ip6t_hbh.c lp = skb_header_pointer(skb, ptr + 1, lp 127 net/ipv6/netfilter/ip6t_hbh.c if (lp == NULL) lp 131 net/ipv6/netfilter/ip6t_hbh.c if (spec_len != 0x00FF && spec_len != *lp) { lp 132 net/ipv6/netfilter/ip6t_hbh.c pr_debug("Lbad %02X %04X\n", *lp, lp 137 net/ipv6/netfilter/ip6t_hbh.c optlen = *lp + 2; lp 607 net/sunrpc/cache.c struct list_head *lp; lp 613 net/sunrpc/cache.c lp = cache_defer_hash[hash].next; lp 614 net/sunrpc/cache.c if (lp) { lp 615 net/sunrpc/cache.c while (lp != &cache_defer_hash[hash]) { lp 616 net/sunrpc/cache.c dreq = list_entry(lp, struct cache_deferred_req, hash); lp 617 net/sunrpc/cache.c lp = lp->next; lp 960 net/sunrpc/cache.c int len = *lp; lp 989 net/sunrpc/cache.c *lp = len; lp 996 net/sunrpc/cache.c int len = *lp; lp 1018 net/sunrpc/cache.c *lp = len; lp 682 security/security.c return security_ops->task_setscheduler(p, policy, lp); lp 3316 security/selinux/hooks.c rc = secondary_ops->task_setscheduler(p, policy, lp); lp 1328 security/selinux/ss/policydb.c memset(lp, 0, sizeof(*lp)); lp 1335 security/selinux/ss/policydb.c lp->sens = le32_to_cpu(buf[0]); lp 1337 security/selinux/ss/policydb.c if (ebitmap_read(&lp->cat, fp)) { lp 1109 security/smack/smack_lsm.c rc = cap_task_setscheduler(p, policy, lp);