bclink 118 net/tipc/bcast.c static struct bclink *bclink = NULL;
bclink 154 net/tipc/bcast.c struct sk_buff *buf = n_ptr->bclink.deferred_head;
bclink 156 net/tipc/bcast.c n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
bclink 157 net/tipc/bcast.c mod(n_ptr->bclink.last_in);
bclink 159 net/tipc/bcast.c n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
bclink 211 net/tipc/bcast.c if (less_eq(acked, n_ptr->bclink.acked))
bclink 219 net/tipc/bcast.c while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
bclink 236 net/tipc/bcast.c n_ptr->bclink.acked = acked;
bclink 272 net/tipc/bcast.c if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
bclink 281 net/tipc/bcast.c msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
bclink 282 net/tipc/bcast.c msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
bclink 283 net/tipc/bcast.c msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
bclink 301 net/tipc/bcast.c n_ptr->bclink.nack_sync = tipc_own_tag;
bclink 313 net/tipc/bcast.c if (!n_ptr->bclink.supported ||
bclink 314 net/tipc/bcast.c less_eq(last_sent, mod(n_ptr->bclink.last_in)))
bclink 318 net/tipc/bcast.c if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
bclink 319 net/tipc/bcast.c n_ptr->bclink.gap_to = last_sent;
bclink 340 net/tipc/bcast.c my_after = n_ptr->bclink.gap_after;
bclink 341 net/tipc/bcast.c my_to = n_ptr->bclink.gap_to;
bclink 345 net/tipc/bcast.c n_ptr->bclink.gap_after = gap_to;
bclink 347 net/tipc/bcast.c n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
bclink 350 net/tipc/bcast.c n_ptr->bclink.gap_to = gap_after;
bclink 355 net/tipc/bcast.c struct sk_buff *buf = n_ptr->bclink.deferred_head;
bclink 356 net/tipc/bcast.c u32 prev = n_ptr->bclink.gap_to;
bclink 370 net/tipc/bcast.c n_ptr->bclink.gap_to = gap_after;
bclink 376 net/tipc/bcast.c if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
bclink 428 net/tipc/bcast.c if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
bclink 467 net/tipc/bcast.c deferred = node->bclink.deferred_head;
bclink 468 net/tipc/bcast.c next_in = mod(node->bclink.last_in + 1);
bclink 473 net/tipc/bcast.c node->bclink.last_in++;
bclink 489 net/tipc/bcast.c if (tipc_link_recv_fragment(&node->bclink.defragm,
bclink 502 net/tipc/bcast.c node->bclink.deferred_head = deferred->next;
bclink 507 net/tipc/bcast.c u32 gap_after = node->bclink.gap_after;
bclink 508 net/tipc/bcast.c u32 gap_to = node->bclink.gap_to;
bclink 510 net/tipc/bcast.c if (tipc_link_defer_pkt(&node->bclink.deferred_head,
bclink 511 net/tipc/bcast.c &node->bclink.deferred_tail,
bclink 513 net/tipc/bcast.c node->bclink.nack_sync++;
bclink 516 net/tipc/bcast.c node->bclink.gap_after = seqno;
bclink 518 net/tipc/bcast.c node->bclink.gap_to = seqno;
bclink 520 net/tipc/bcast.c if (bclink_ack_allowed(node->bclink.nack_sync)) {
bclink 543 net/tipc/bcast.c return (n_ptr->bclink.supported &&
bclink 544 net/tipc/bcast.c (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
bclink 778 net/tipc/bcast.c bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
bclink 779 net/tipc/bcast.c if (!bcbearer || !bclink) {
bclink 784 net/tipc/bcast.c kfree(bclink);
bclink 785 net/tipc/bcast.c bclink = NULL;
bclink 794 net/tipc/bcast.c bcl = &bclink->link;
bclink 797 net/tipc/bcast.c spin_lock_init(&bclink->node.lock);
bclink 798 net/tipc/bcast.c bcl->owner = &bclink->node;
bclink 824 net/tipc/bcast.c kfree(bclink);
bclink 825 net/tipc/bcast.c bclink = NULL;
bclink 1043 net/tipc/link.c msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
bclink 1558 net/tipc/link.c msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
bclink 1577 net/tipc/link.c msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
bclink 1601 net/tipc/link.c msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
bclink 1697 net/tipc/link.c tipc_printf(TIPC_OUTPUT, "Supported: %d, ", n_ptr->bclink.supported);
bclink 1698 net/tipc/link.c tipc_printf(TIPC_OUTPUT, "Acked: %u\n", n_ptr->bclink.acked);
bclink 1699 net/tipc/link.c tipc_printf(TIPC_OUTPUT, "Last in: %u, ", n_ptr->bclink.last_in);
bclink 1700 net/tipc/link.c tipc_printf(TIPC_OUTPUT, "Gap after: %u, ", n_ptr->bclink.gap_after);
bclink 1701 net/tipc/link.c tipc_printf(TIPC_OUTPUT, "Gap to: %u\n", n_ptr->bclink.gap_to);
bclink 1702 net/tipc/link.c tipc_printf(TIPC_OUTPUT, "Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
bclink 1751 net/tipc/link.c msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
bclink 1905 net/tipc/link.c if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
bclink 1906 net/tipc/link.c if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
bclink 2136 net/tipc/link.c msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
bclink 2297 net/tipc/link.c l_ptr->owner->bclink.supported = (max_pkt_info != 0);
bclink 2306 net/tipc/link.c l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
bclink 2496 net/tipc/link.c msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
bclink 351 net/tipc/node.c n_ptr->bclink.acked = tipc_bclink_get_last_sent();
bclink 374 net/tipc/node.c if (n_ptr->bclink.supported) {
bclink 395 net/tipc/node.c n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
bclink 396 net/tipc/node.c while (n_ptr->bclink.deferred_head) {
bclink 397 net/tipc/node.c struct sk_buff* buf = n_ptr->bclink.deferred_head;
bclink 398 net/tipc/node.c n_ptr->bclink.deferred_head = buf->next;
bclink 401 net/tipc/node.c if (n_ptr->bclink.defragm) {
bclink 402 net/tipc/node.c buf_discard(n_ptr->bclink.defragm);
bclink 403 net/tipc/node.c n_ptr->bclink.defragm = NULL;
bclink 405 net/tipc/node.c if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
bclink 406 net/tipc/node.c tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
bclink 425 net/tipc/node.c if (n_ptr->bclink.supported) {
bclink 94 net/tipc/node.h } bclink;