Lines Matching full:node
81 body->case_sensitive = conn_impl->node->sbi->s_case_sensitive; in hs_fill_case_sense_data()
88 __u8 sensitive = conn_impl->node->sbi->s_case_sensitive ? 1 : 0; in hs_parse_case_sense_data()
111 body->features = cpu_to_le64(conn_impl->node->sbi->s_features); in hs_fill_feature_data()
125 conn_impl->node->features = le64_to_cpu(body->features); in hs_parse_feature_data()
286 data_len, conn_impl->node->device_id); in hs_proc_msg_data()
295 req_len, data_len, conn_impl->node->device_id); in hs_proc_msg_data()
306 ops, data_len, conn_impl->node->device_id, hs_req->len); in hs_proc_msg_data()
365 (conn_impl->node->version >= DFS_2_0)) { in do_send_handshake()
383 (conn_impl->node->version >= DFS_2_0)) { in do_send_handshake()
411 static int hmdfs_node_waiting_evt_sum(const struct hmdfs_peer *node) in hmdfs_node_waiting_evt_sum() argument
417 sum += node->waiting_evt[i]; in hmdfs_node_waiting_evt_sum()
422 static int hmdfs_update_node_waiting_evt(struct hmdfs_peer *node, int evt, in hmdfs_update_node_waiting_evt() argument
429 sum = hmdfs_node_waiting_evt_sum(node); in hmdfs_update_node_waiting_evt()
431 last = !node->pending_evt; in hmdfs_update_node_waiting_evt()
433 last = node->pending_evt; in hmdfs_update_node_waiting_evt()
437 node->dup_evt[evt]++; in hmdfs_update_node_waiting_evt()
441 node->waiting_evt[evt]++; in hmdfs_update_node_waiting_evt()
442 hmdfs_debug("add node->waiting_evt[%d]=%d", evt, in hmdfs_update_node_waiting_evt()
443 node->waiting_evt[evt]); in hmdfs_update_node_waiting_evt()
449 if (node->waiting_evt[RAW_NODE_EVT_OFF] >= 2 && in hmdfs_update_node_waiting_evt()
450 node->waiting_evt[RAW_NODE_EVT_ON] >= 1) { in hmdfs_update_node_waiting_evt()
451 node->waiting_evt[RAW_NODE_EVT_OFF] -= 1; in hmdfs_update_node_waiting_evt()
452 node->waiting_evt[RAW_NODE_EVT_ON] -= 1; in hmdfs_update_node_waiting_evt()
453 node->seq_wr_idx -= 2; in hmdfs_update_node_waiting_evt()
454 node->merged_evt += 2; in hmdfs_update_node_waiting_evt()
457 next = hmdfs_node_inc_evt_seq(node); in hmdfs_update_node_waiting_evt()
458 node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = next; in hmdfs_update_node_waiting_evt()
464 static void hmdfs_run_evt_cb_verbosely(struct hmdfs_peer *node, int raw_evt, in hmdfs_run_evt_cb_verbosely() argument
471 node->cur_evt[cur_evt_idx] = raw_evt; in hmdfs_run_evt_cb_verbosely()
472 node->cur_evt_seq[cur_evt_idx] = seq; in hmdfs_run_evt_cb_verbosely()
473 hmdfs_node_call_evt_cb(node, evt, sync, seq); in hmdfs_run_evt_cb_verbosely()
474 node->cur_evt[cur_evt_idx] = RAW_NODE_EVT_NR; in hmdfs_run_evt_cb_verbosely()
479 struct hmdfs_peer *node = in hmdfs_node_evt_work() local
488 mutex_lock(&node->seq_lock); in hmdfs_node_evt_work()
489 seq = node->seq_tbl[(node->seq_rd_idx++) % RAW_NODE_EVT_MAX_NR]; in hmdfs_node_evt_work()
490 hmdfs_run_evt_cb_verbosely(node, node->pending_evt, false, seq); in hmdfs_node_evt_work()
491 mutex_unlock(&node->seq_lock); in hmdfs_node_evt_work()
493 mutex_lock(&node->evt_lock); in hmdfs_node_evt_work()
494 if (hmdfs_node_waiting_evt_sum(node)) { in hmdfs_node_evt_work()
495 node->pending_evt = !node->pending_evt; in hmdfs_node_evt_work()
496 node->pending_evt_seq = in hmdfs_node_evt_work()
497 node->seq_tbl[node->seq_rd_idx % RAW_NODE_EVT_MAX_NR]; in hmdfs_node_evt_work()
498 node->waiting_evt[node->pending_evt]--; in hmdfs_node_evt_work()
500 schedule_delayed_work(&node->evt_dwork, in hmdfs_node_evt_work()
501 node->sbi->async_cb_delay * HZ); in hmdfs_node_evt_work()
503 node->last_evt = node->pending_evt; in hmdfs_node_evt_work()
504 node->pending_evt = RAW_NODE_EVT_NR; in hmdfs_node_evt_work()
506 mutex_unlock(&node->evt_lock); in hmdfs_node_evt_work()
521 static void hmdfs_queue_raw_node_evt(struct hmdfs_peer *node, int evt) in hmdfs_queue_raw_node_evt() argument
525 mutex_lock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
526 if (node->pending_evt == RAW_NODE_EVT_NR) { in hmdfs_queue_raw_node_evt()
527 if (evt == node->last_evt) { in hmdfs_queue_raw_node_evt()
528 node->dup_evt[evt]++; in hmdfs_queue_raw_node_evt()
529 mutex_unlock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
532 node->pending_evt = evt; in hmdfs_queue_raw_node_evt()
533 seq = hmdfs_node_inc_evt_seq(node); in hmdfs_queue_raw_node_evt()
534 node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = seq; in hmdfs_queue_raw_node_evt()
535 node->pending_evt_seq = seq; in hmdfs_queue_raw_node_evt()
536 mutex_lock(&node->seq_lock); in hmdfs_queue_raw_node_evt()
537 mutex_unlock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
539 hmdfs_run_evt_cb_verbosely(node, evt, true, seq); in hmdfs_queue_raw_node_evt()
540 mutex_unlock(&node->seq_lock); in hmdfs_queue_raw_node_evt()
541 schedule_delayed_work(&node->evt_dwork, in hmdfs_queue_raw_node_evt()
542 node->sbi->async_cb_delay * HZ); in hmdfs_queue_raw_node_evt()
543 } else if (hmdfs_update_node_waiting_evt(node, evt, &seq) > 0) { in hmdfs_queue_raw_node_evt()
548 mutex_lock(&node->seq_lock); in hmdfs_queue_raw_node_evt()
549 mutex_unlock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
550 hmdfs_run_evt_cb_verbosely(node, evt, true, seq); in hmdfs_queue_raw_node_evt()
551 mutex_unlock(&node->seq_lock); in hmdfs_queue_raw_node_evt()
553 mutex_unlock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
571 void connection_handshake_notify(struct hmdfs_peer *node, int notify_type) in connection_handshake_notify() argument
577 memcpy(param.remote_cid, node->cid, HMDFS_CID_SIZE); in connection_handshake_notify()
578 notify(node, ¶m); in connection_handshake_notify()
595 void connection_to_working(struct hmdfs_peer *node) in connection_to_working() argument
600 if (!node) in connection_to_working()
602 mutex_lock(&node->conn_impl_list_lock); in connection_to_working()
603 list_for_each_entry(conn_impl, &node->conn_impl_list, list) { in connection_to_working()
611 mutex_unlock(&node->conn_impl_list_lock); in connection_to_working()
612 peer_online(node); in connection_to_working()
637 conn_impl->node->version = version; in connection_handshake_recv_handler()
640 conn_impl->node->conn_operations = hmdfs_get_peer_operation(version); in connection_handshake_recv_handler()
647 conn_impl->node->device_id, version, head->datasize, fd); in connection_handshake_recv_handler()
651 if (conn_impl->node->version >= DFS_2_0) { in connection_handshake_recv_handler()
653 conn_impl->node->status = NODE_STAT_SHAKING; in connection_handshake_recv_handler()
661 conn_impl->node->device_id, status, fd); in connection_handshake_recv_handler()
664 connection_to_working(conn_impl->node); in connection_handshake_recv_handler()
668 if (conn_impl->node->version >= DFS_2_0) { in connection_handshake_recv_handler()
687 peer_online(conn_impl->node); in connection_handshake_recv_handler()
690 if (conn_impl->node->version >= DFS_2_0) { in connection_handshake_recv_handler()
704 peer_online(conn_impl->node); in connection_handshake_recv_handler()
716 connection_handshake_notify(conn_impl->node, in connection_handshake_recv_handler()
719 conn_impl->node->device_id, fd); in connection_handshake_recv_handler()
732 if (hmdfs_message_verify(conn->node, head, data) < 0) { in update_tls_crypto_key()
753 __u8 version = conn->node->version; in cmd_update_tls_crypto_key()
771 conn_impl->node->conn_operations->recvmsg(conn_impl->node, buf, data); in connection_working_recv_handler()
797 mutex_lock(&conn->node->conn_impl_list_lock); in connection_release()
799 mutex_unlock(&conn->node->conn_impl_list_lock); in connection_release()
804 wake_up_interruptible(&conn->node->deleting_list_wq); in connection_release()
853 static void hmdfs_dump_deleting_list(struct hmdfs_peer *node) in hmdfs_dump_deleting_list() argument
859 mutex_lock(&node->conn_impl_list_lock); in hmdfs_dump_deleting_list()
860 list_for_each_entry(con, &node->conn_deleting_list, list) { in hmdfs_dump_deleting_list()
863 count, node->device_id, tcp ? tcp->fd : -1, in hmdfs_dump_deleting_list()
867 mutex_unlock(&node->conn_impl_list_lock); in hmdfs_dump_deleting_list()
870 static bool hmdfs_conn_deleting_list_empty(struct hmdfs_peer *node) in hmdfs_conn_deleting_list_empty() argument
874 mutex_lock(&node->conn_impl_list_lock); in hmdfs_conn_deleting_list_empty()
875 empty = list_empty(&node->conn_deleting_list); in hmdfs_conn_deleting_list_empty()
876 mutex_unlock(&node->conn_impl_list_lock); in hmdfs_conn_deleting_list_empty()
881 void hmdfs_disconnect_node(struct hmdfs_peer *node) in hmdfs_disconnect_node() argument
888 if (unlikely(!node)) in hmdfs_disconnect_node()
891 hmdfs_node_inc_evt_seq(node); in hmdfs_disconnect_node()
894 node->status = NODE_STAT_OFFLINE; in hmdfs_disconnect_node()
895 hmdfs_info("Try to disconnect peer: device_id %llu", node->device_id); in hmdfs_disconnect_node()
897 mutex_lock(&node->conn_impl_list_lock); in hmdfs_disconnect_node()
898 if (!list_empty(&node->conn_impl_list)) in hmdfs_disconnect_node()
899 list_replace_init(&node->conn_impl_list, &local_conns); in hmdfs_disconnect_node()
900 mutex_unlock(&node->conn_impl_list_lock); in hmdfs_disconnect_node()
918 if (wait_event_interruptible_timeout(node->deleting_list_wq, in hmdfs_disconnect_node()
919 hmdfs_conn_deleting_list_empty(node), in hmdfs_disconnect_node()
921 hmdfs_dump_deleting_list(node); in hmdfs_disconnect_node()
924 spin_lock(&node->idr_lock); in hmdfs_disconnect_node()
925 while (node->msg_idr_process) { in hmdfs_disconnect_node()
926 spin_unlock(&node->idr_lock); in hmdfs_disconnect_node()
929 spin_lock(&node->idr_lock); in hmdfs_disconnect_node()
931 spin_unlock(&node->idr_lock); in hmdfs_disconnect_node()
933 hmdfs_queue_raw_node_evt(node, RAW_NODE_EVT_OFF); in hmdfs_disconnect_node()
936 static void hmdfs_run_simple_evt_cb(struct hmdfs_peer *node, int evt) in hmdfs_run_simple_evt_cb() argument
938 unsigned int seq = hmdfs_node_inc_evt_seq(node); in hmdfs_run_simple_evt_cb()
940 mutex_lock(&node->seq_lock); in hmdfs_run_simple_evt_cb()
941 hmdfs_node_call_evt_cb(node, evt, true, seq); in hmdfs_run_simple_evt_cb()
942 mutex_unlock(&node->seq_lock); in hmdfs_run_simple_evt_cb()
945 static void hmdfs_del_peer(struct hmdfs_peer *node) in hmdfs_del_peer() argument
952 cancel_delayed_work_sync(&node->evt_dwork); in hmdfs_del_peer()
954 hmdfs_run_simple_evt_cb(node, NODE_EVT_DEL); in hmdfs_del_peer()
956 hmdfs_release_peer_sysfs(node); in hmdfs_del_peer()
958 flush_workqueue(node->reget_conn_wq); in hmdfs_del_peer()
959 peer_put(node); in hmdfs_del_peer()
964 struct hmdfs_peer *node = NULL; in hmdfs_connections_stop() local
968 list_for_each_entry_safe(node, con_tmp, &sbi->connections.node_list, in hmdfs_connections_stop()
971 hmdfs_disconnect_node(node); in hmdfs_connections_stop()
972 hmdfs_del_peer(node); in hmdfs_connections_stop()
978 struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type) in get_conn_impl() argument
982 if (!node) in get_conn_impl()
984 mutex_lock(&node->conn_impl_list_lock); in get_conn_impl()
985 list_for_each_entry(conn_impl, &node->conn_impl_list, list) { in get_conn_impl()
989 mutex_unlock(&node->conn_impl_list_lock); in get_conn_impl()
993 mutex_unlock(&node->conn_impl_list_lock); in get_conn_impl()
995 node->device_id, connect_type); in get_conn_impl()
999 void set_conn_sock_quickack(struct hmdfs_peer *node) in set_conn_sock_quickack() argument
1005 if (!node) in set_conn_sock_quickack()
1007 mutex_lock(&node->conn_impl_list_lock); in set_conn_sock_quickack()
1008 list_for_each_entry(conn_impl, &node->conn_impl_list, list) { in set_conn_sock_quickack()
1016 mutex_unlock(&node->conn_impl_list_lock); in set_conn_sock_quickack()
1063 struct hmdfs_peer *node = NULL; in lookup_peer_by_cid_unsafe() local
1065 list_for_each_entry(node, &sbi->connections.node_list, list) in lookup_peer_by_cid_unsafe()
1066 if (!strncmp(node->cid, cid, HMDFS_CID_SIZE)) { in lookup_peer_by_cid_unsafe()
1067 peer_get(node); in lookup_peer_by_cid_unsafe()
1068 return node; in lookup_peer_by_cid_unsafe()
1098 struct hmdfs_peer *node = kzalloc(sizeof(*node), GFP_KERNEL); in alloc_peer() local
1100 if (!node) in alloc_peer()
1103 node->device_id = (u32)atomic_inc_return(&sbi->connections.conn_seq); in alloc_peer()
1105 node->async_wq = alloc_workqueue("dfs_async%u_%llu", WQ_MEM_RECLAIM, 0, in alloc_peer()
1106 sbi->seq, node->device_id); in alloc_peer()
1107 if (!node->async_wq) { in alloc_peer()
1111 node->req_handle_wq = alloc_workqueue("dfs_req%u_%llu", in alloc_peer()
1114 sbi->seq, node->device_id); in alloc_peer()
1115 if (!node->req_handle_wq) { in alloc_peer()
1119 node->dentry_wq = alloc_workqueue("dfs_dentry%u_%llu", in alloc_peer()
1121 0, sbi->seq, node->device_id); in alloc_peer()
1122 if (!node->dentry_wq) { in alloc_peer()
1126 node->retry_wb_wq = alloc_workqueue("dfs_rwb%u_%llu", in alloc_peer()
1129 sbi->seq, node->device_id); in alloc_peer()
1130 if (!node->retry_wb_wq) { in alloc_peer()
1134 node->reget_conn_wq = alloc_workqueue("dfs_regetcon%u_%llu", in alloc_peer()
1136 sbi->seq, node->device_id); in alloc_peer()
1137 if (!node->reget_conn_wq) { in alloc_peer()
1141 INIT_LIST_HEAD(&node->conn_impl_list); in alloc_peer()
1142 mutex_init(&node->conn_impl_list_lock); in alloc_peer()
1143 INIT_LIST_HEAD(&node->conn_deleting_list); in alloc_peer()
1144 init_waitqueue_head(&node->deleting_list_wq); in alloc_peer()
1145 idr_init(&node->msg_idr); in alloc_peer()
1146 spin_lock_init(&node->idr_lock); in alloc_peer()
1147 idr_init(&node->file_id_idr); in alloc_peer()
1148 spin_lock_init(&node->file_id_lock); in alloc_peer()
1149 INIT_LIST_HEAD(&node->list); in alloc_peer()
1150 kref_init(&node->ref_cnt); in alloc_peer()
1151 node->owner = sbi->seq; in alloc_peer()
1152 node->conn_operations = conn_operations; in alloc_peer()
1153 node->sbi = sbi; in alloc_peer()
1154 node->status = NODE_STAT_SHAKING; in alloc_peer()
1155 node->conn_time = jiffies; in alloc_peer()
1156 memcpy(node->cid, cid, HMDFS_CID_SIZE); in alloc_peer()
1157 atomic64_set(&node->sb_dirty_count, 0); in alloc_peer()
1158 node->fid_cookie = 0; in alloc_peer()
1159 atomic_set(&node->evt_seq, 0); in alloc_peer()
1160 mutex_init(&node->seq_lock); in alloc_peer()
1161 mutex_init(&node->offline_cb_lock); in alloc_peer()
1162 mutex_init(&node->evt_lock); in alloc_peer()
1163 node->pending_evt = RAW_NODE_EVT_NR; in alloc_peer()
1164 node->last_evt = RAW_NODE_EVT_NR; in alloc_peer()
1165 node->cur_evt[0] = RAW_NODE_EVT_NR; in alloc_peer()
1166 node->cur_evt[1] = RAW_NODE_EVT_NR; in alloc_peer()
1167 node->seq_wr_idx = (unsigned char)UINT_MAX; in alloc_peer()
1168 node->seq_rd_idx = node->seq_wr_idx; in alloc_peer()
1169 INIT_DELAYED_WORK(&node->evt_dwork, hmdfs_node_evt_work); in alloc_peer()
1170 node->msg_idr_process = 0; in alloc_peer()
1171 node->offline_start = false; in alloc_peer()
1172 spin_lock_init(&node->wr_opened_inode_lock); in alloc_peer()
1173 INIT_LIST_HEAD(&node->wr_opened_inode_list); in alloc_peer()
1174 spin_lock_init(&node->stashed_inode_lock); in alloc_peer()
1175 node->stashed_inode_nr = 0; in alloc_peer()
1176 atomic_set(&node->rebuild_inode_status_nr, 0); in alloc_peer()
1177 init_waitqueue_head(&node->rebuild_inode_status_wq); in alloc_peer()
1178 INIT_LIST_HEAD(&node->stashed_inode_list); in alloc_peer()
1179 node->need_rebuild_stash_list = false; in alloc_peer()
1180 node->devsl = devsl; in alloc_peer()
1182 return node; in alloc_peer()
1185 if (node->async_wq) { in alloc_peer()
1186 destroy_workqueue(node->async_wq); in alloc_peer()
1187 node->async_wq = NULL; in alloc_peer()
1189 if (node->req_handle_wq) { in alloc_peer()
1190 destroy_workqueue(node->req_handle_wq); in alloc_peer()
1191 node->req_handle_wq = NULL; in alloc_peer()
1193 if (node->dentry_wq) { in alloc_peer()
1194 destroy_workqueue(node->dentry_wq); in alloc_peer()
1195 node->dentry_wq = NULL; in alloc_peer()
1197 if (node->retry_wb_wq) { in alloc_peer()
1198 destroy_workqueue(node->retry_wb_wq); in alloc_peer()
1199 node->retry_wb_wq = NULL; in alloc_peer()
1201 if (node->reget_conn_wq) { in alloc_peer()
1202 destroy_workqueue(node->reget_conn_wq); in alloc_peer()
1203 node->reget_conn_wq = NULL; in alloc_peer()
1205 kfree(node); in alloc_peer()