Lines Matching full:node
81 body->case_sensitive = conn_impl->node->sbi->s_case_sensitive; in hs_fill_case_sense_data()
88 __u8 sensitive = conn_impl->node->sbi->s_case_sensitive ? 1 : 0; in hs_parse_case_sense_data()
111 body->features = cpu_to_le64(conn_impl->node->sbi->s_features); in hs_fill_feature_data()
125 conn_impl->node->features = le64_to_cpu(body->features); in hs_parse_feature_data()
286 data_len, conn_impl->node->device_id); in hs_proc_msg_data()
295 req_len, data_len, conn_impl->node->device_id); in hs_proc_msg_data()
306 ops, data_len, conn_impl->node->device_id, hs_req->len); in hs_proc_msg_data()
409 static int hmdfs_node_waiting_evt_sum(const struct hmdfs_peer *node) in hmdfs_node_waiting_evt_sum() argument
415 sum += node->waiting_evt[i]; in hmdfs_node_waiting_evt_sum()
420 static int hmdfs_update_node_waiting_evt(struct hmdfs_peer *node, int evt, in hmdfs_update_node_waiting_evt() argument
427 sum = hmdfs_node_waiting_evt_sum(node); in hmdfs_update_node_waiting_evt()
429 last = !node->pending_evt; in hmdfs_update_node_waiting_evt()
431 last = node->pending_evt; in hmdfs_update_node_waiting_evt()
435 node->dup_evt[evt]++; in hmdfs_update_node_waiting_evt()
439 node->waiting_evt[evt]++; in hmdfs_update_node_waiting_evt()
440 hmdfs_debug("add node->waiting_evt[%d]=%d", evt, in hmdfs_update_node_waiting_evt()
441 node->waiting_evt[evt]); in hmdfs_update_node_waiting_evt()
447 if (node->waiting_evt[RAW_NODE_EVT_OFF] >= 2 && in hmdfs_update_node_waiting_evt()
448 node->waiting_evt[RAW_NODE_EVT_ON] >= 1) { in hmdfs_update_node_waiting_evt()
449 node->waiting_evt[RAW_NODE_EVT_OFF] -= 1; in hmdfs_update_node_waiting_evt()
450 node->waiting_evt[RAW_NODE_EVT_ON] -= 1; in hmdfs_update_node_waiting_evt()
451 node->seq_wr_idx -= 2; in hmdfs_update_node_waiting_evt()
452 node->merged_evt += 2; in hmdfs_update_node_waiting_evt()
455 next = hmdfs_node_inc_evt_seq(node); in hmdfs_update_node_waiting_evt()
456 node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = next; in hmdfs_update_node_waiting_evt()
462 static void hmdfs_run_evt_cb_verbosely(struct hmdfs_peer *node, int raw_evt, in hmdfs_run_evt_cb_verbosely() argument
469 node->cur_evt[cur_evt_idx] = raw_evt; in hmdfs_run_evt_cb_verbosely()
470 node->cur_evt_seq[cur_evt_idx] = seq; in hmdfs_run_evt_cb_verbosely()
471 hmdfs_node_call_evt_cb(node, evt, sync, seq); in hmdfs_run_evt_cb_verbosely()
472 node->cur_evt[cur_evt_idx] = RAW_NODE_EVT_NR; in hmdfs_run_evt_cb_verbosely()
477 struct hmdfs_peer *node = in hmdfs_node_evt_work() local
486 mutex_lock(&node->seq_lock); in hmdfs_node_evt_work()
487 seq = node->seq_tbl[(node->seq_rd_idx++) % RAW_NODE_EVT_MAX_NR]; in hmdfs_node_evt_work()
488 hmdfs_run_evt_cb_verbosely(node, node->pending_evt, false, seq); in hmdfs_node_evt_work()
489 mutex_unlock(&node->seq_lock); in hmdfs_node_evt_work()
491 mutex_lock(&node->evt_lock); in hmdfs_node_evt_work()
492 if (hmdfs_node_waiting_evt_sum(node)) { in hmdfs_node_evt_work()
493 node->pending_evt = !node->pending_evt; in hmdfs_node_evt_work()
494 node->pending_evt_seq = in hmdfs_node_evt_work()
495 node->seq_tbl[node->seq_rd_idx % RAW_NODE_EVT_MAX_NR]; in hmdfs_node_evt_work()
496 node->waiting_evt[node->pending_evt]--; in hmdfs_node_evt_work()
498 schedule_delayed_work(&node->evt_dwork, in hmdfs_node_evt_work()
499 node->sbi->async_cb_delay * HZ); in hmdfs_node_evt_work()
501 node->last_evt = node->pending_evt; in hmdfs_node_evt_work()
502 node->pending_evt = RAW_NODE_EVT_NR; in hmdfs_node_evt_work()
504 mutex_unlock(&node->evt_lock); in hmdfs_node_evt_work()
519 static void hmdfs_queue_raw_node_evt(struct hmdfs_peer *node, int evt) in hmdfs_queue_raw_node_evt() argument
523 mutex_lock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
524 if (node->pending_evt == RAW_NODE_EVT_NR) { in hmdfs_queue_raw_node_evt()
525 if (evt == node->last_evt) { in hmdfs_queue_raw_node_evt()
526 node->dup_evt[evt]++; in hmdfs_queue_raw_node_evt()
527 mutex_unlock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
530 node->pending_evt = evt; in hmdfs_queue_raw_node_evt()
531 seq = hmdfs_node_inc_evt_seq(node); in hmdfs_queue_raw_node_evt()
532 node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = seq; in hmdfs_queue_raw_node_evt()
533 node->pending_evt_seq = seq; in hmdfs_queue_raw_node_evt()
534 mutex_lock(&node->seq_lock); in hmdfs_queue_raw_node_evt()
535 mutex_unlock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
537 hmdfs_run_evt_cb_verbosely(node, evt, true, seq); in hmdfs_queue_raw_node_evt()
538 mutex_unlock(&node->seq_lock); in hmdfs_queue_raw_node_evt()
539 schedule_delayed_work(&node->evt_dwork, in hmdfs_queue_raw_node_evt()
540 node->sbi->async_cb_delay * HZ); in hmdfs_queue_raw_node_evt()
541 } else if (hmdfs_update_node_waiting_evt(node, evt, &seq) > 0) { in hmdfs_queue_raw_node_evt()
546 mutex_lock(&node->seq_lock); in hmdfs_queue_raw_node_evt()
547 mutex_unlock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
548 hmdfs_run_evt_cb_verbosely(node, evt, true, seq); in hmdfs_queue_raw_node_evt()
549 mutex_unlock(&node->seq_lock); in hmdfs_queue_raw_node_evt()
551 mutex_unlock(&node->evt_lock); in hmdfs_queue_raw_node_evt()
569 void connection_handshake_notify(struct hmdfs_peer *node, int notify_type) in connection_handshake_notify() argument
575 memcpy(param.remote_cid, node->cid, HMDFS_CID_SIZE); in connection_handshake_notify()
576 notify(node, ¶m); in connection_handshake_notify()
593 void connection_to_working(struct hmdfs_peer *node) in connection_to_working() argument
598 if (!node) in connection_to_working()
600 mutex_lock(&node->conn_impl_list_lock); in connection_to_working()
601 list_for_each_entry(conn_impl, &node->conn_impl_list, list) { in connection_to_working()
609 mutex_unlock(&node->conn_impl_list_lock); in connection_to_working()
610 peer_online(node); in connection_to_working()
625 conn_impl->node->version = head->version; in connection_handshake_recv_handler()
632 conn_impl->node->device_id, head->datasize, fd); in connection_handshake_recv_handler()
637 conn_impl->node->status = NODE_STAT_SHAKING; in connection_handshake_recv_handler()
642 conn_impl->node->device_id, status, fd); in connection_handshake_recv_handler()
645 connection_to_working(conn_impl->node); in connection_handshake_recv_handler()
666 peer_online(conn_impl->node); in connection_handshake_recv_handler()
682 peer_online(conn_impl->node); in connection_handshake_recv_handler()
693 connection_handshake_notify(conn_impl->node, NOTIFY_OFFLINE); in connection_handshake_recv_handler()
695 conn_impl->node->device_id, fd); in connection_handshake_recv_handler()
708 if (hmdfs_message_verify(conn->node, head, data) < 0) { in update_tls_crypto_key()
746 hmdfs_recv_mesg_callback(conn_impl->node, buf, data); in connection_working_recv_handler()
772 mutex_lock(&conn->node->conn_impl_list_lock); in connection_release()
774 mutex_unlock(&conn->node->conn_impl_list_lock); in connection_release()
779 wake_up_interruptible(&conn->node->deleting_list_wq); in connection_release()
828 static void hmdfs_dump_deleting_list(struct hmdfs_peer *node) in hmdfs_dump_deleting_list() argument
834 mutex_lock(&node->conn_impl_list_lock); in hmdfs_dump_deleting_list()
835 list_for_each_entry(con, &node->conn_deleting_list, list) { in hmdfs_dump_deleting_list()
838 count, node->device_id, tcp ? tcp->fd : -1, in hmdfs_dump_deleting_list()
842 mutex_unlock(&node->conn_impl_list_lock); in hmdfs_dump_deleting_list()
845 static bool hmdfs_conn_deleting_list_empty(struct hmdfs_peer *node) in hmdfs_conn_deleting_list_empty() argument
849 mutex_lock(&node->conn_impl_list_lock); in hmdfs_conn_deleting_list_empty()
850 empty = list_empty(&node->conn_deleting_list); in hmdfs_conn_deleting_list_empty()
851 mutex_unlock(&node->conn_impl_list_lock); in hmdfs_conn_deleting_list_empty()
856 void hmdfs_disconnect_node(struct hmdfs_peer *node) in hmdfs_disconnect_node() argument
863 if (unlikely(!node)) in hmdfs_disconnect_node()
866 hmdfs_node_inc_evt_seq(node); in hmdfs_disconnect_node()
869 node->status = NODE_STAT_OFFLINE; in hmdfs_disconnect_node()
870 hmdfs_info("Try to disconnect peer: device_id %llu", node->device_id); in hmdfs_disconnect_node()
872 mutex_lock(&node->conn_impl_list_lock); in hmdfs_disconnect_node()
873 if (!list_empty(&node->conn_impl_list)) in hmdfs_disconnect_node()
874 list_replace_init(&node->conn_impl_list, &local_conns); in hmdfs_disconnect_node()
875 mutex_unlock(&node->conn_impl_list_lock); in hmdfs_disconnect_node()
893 if (wait_event_interruptible_timeout(node->deleting_list_wq, in hmdfs_disconnect_node()
894 hmdfs_conn_deleting_list_empty(node), in hmdfs_disconnect_node()
896 hmdfs_dump_deleting_list(node); in hmdfs_disconnect_node()
899 spin_lock(&node->idr_lock); in hmdfs_disconnect_node()
900 while (node->msg_idr_process) { in hmdfs_disconnect_node()
901 spin_unlock(&node->idr_lock); in hmdfs_disconnect_node()
904 spin_lock(&node->idr_lock); in hmdfs_disconnect_node()
906 spin_unlock(&node->idr_lock); in hmdfs_disconnect_node()
908 hmdfs_queue_raw_node_evt(node, RAW_NODE_EVT_OFF); in hmdfs_disconnect_node()
911 static void hmdfs_run_simple_evt_cb(struct hmdfs_peer *node, int evt) in hmdfs_run_simple_evt_cb() argument
913 unsigned int seq = hmdfs_node_inc_evt_seq(node); in hmdfs_run_simple_evt_cb()
915 mutex_lock(&node->seq_lock); in hmdfs_run_simple_evt_cb()
916 hmdfs_node_call_evt_cb(node, evt, true, seq); in hmdfs_run_simple_evt_cb()
917 mutex_unlock(&node->seq_lock); in hmdfs_run_simple_evt_cb()
920 static void hmdfs_del_peer(struct hmdfs_peer *node) in hmdfs_del_peer() argument
927 cancel_delayed_work_sync(&node->evt_dwork); in hmdfs_del_peer()
929 hmdfs_run_simple_evt_cb(node, NODE_EVT_DEL); in hmdfs_del_peer()
931 hmdfs_release_peer_sysfs(node); in hmdfs_del_peer()
933 flush_workqueue(node->reget_conn_wq); in hmdfs_del_peer()
934 peer_put(node); in hmdfs_del_peer()
939 struct hmdfs_peer *node = NULL; in hmdfs_connections_stop() local
943 list_for_each_entry_safe(node, con_tmp, &sbi->connections.node_list, in hmdfs_connections_stop()
946 hmdfs_disconnect_node(node); in hmdfs_connections_stop()
947 hmdfs_del_peer(node); in hmdfs_connections_stop()
953 struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type) in get_conn_impl() argument
957 if (!node) in get_conn_impl()
959 mutex_lock(&node->conn_impl_list_lock); in get_conn_impl()
960 list_for_each_entry(conn_impl, &node->conn_impl_list, list) { in get_conn_impl()
964 mutex_unlock(&node->conn_impl_list_lock); in get_conn_impl()
968 mutex_unlock(&node->conn_impl_list_lock); in get_conn_impl()
970 node->device_id, connect_type); in get_conn_impl()
974 void set_conn_sock_quickack(struct hmdfs_peer *node) in set_conn_sock_quickack() argument
980 if (!node) in set_conn_sock_quickack()
982 mutex_lock(&node->conn_impl_list_lock); in set_conn_sock_quickack()
983 list_for_each_entry(conn_impl, &node->conn_impl_list, list) { in set_conn_sock_quickack()
991 mutex_unlock(&node->conn_impl_list_lock); in set_conn_sock_quickack()
1038 struct hmdfs_peer *node = NULL; in lookup_peer_by_cid_unsafe() local
1040 list_for_each_entry(node, &sbi->connections.node_list, list) in lookup_peer_by_cid_unsafe()
1041 if (!strncmp(node->cid, cid, HMDFS_CID_SIZE)) { in lookup_peer_by_cid_unsafe()
1042 peer_get(node); in lookup_peer_by_cid_unsafe()
1043 return node; in lookup_peer_by_cid_unsafe()
1073 struct hmdfs_peer *node = kzalloc(sizeof(*node), GFP_KERNEL); in alloc_peer() local
1075 if (!node) in alloc_peer()
1078 node->device_id = (u32)atomic_inc_return(&sbi->connections.conn_seq); in alloc_peer()
1080 node->async_wq = alloc_workqueue("dfs_async%u_%llu", WQ_MEM_RECLAIM, 0, in alloc_peer()
1081 sbi->seq, node->device_id); in alloc_peer()
1082 if (!node->async_wq) { in alloc_peer()
1086 node->req_handle_wq = alloc_workqueue("dfs_req%u_%llu", in alloc_peer()
1089 sbi->seq, node->device_id); in alloc_peer()
1090 if (!node->req_handle_wq) { in alloc_peer()
1094 node->dentry_wq = alloc_workqueue("dfs_dentry%u_%llu", in alloc_peer()
1096 0, sbi->seq, node->device_id); in alloc_peer()
1097 if (!node->dentry_wq) { in alloc_peer()
1101 node->retry_wb_wq = alloc_workqueue("dfs_rwb%u_%llu", in alloc_peer()
1104 sbi->seq, node->device_id); in alloc_peer()
1105 if (!node->retry_wb_wq) { in alloc_peer()
1109 node->reget_conn_wq = alloc_workqueue("dfs_regetcon%u_%llu", in alloc_peer()
1111 sbi->seq, node->device_id); in alloc_peer()
1112 if (!node->reget_conn_wq) { in alloc_peer()
1116 INIT_LIST_HEAD(&node->conn_impl_list); in alloc_peer()
1117 mutex_init(&node->conn_impl_list_lock); in alloc_peer()
1118 INIT_LIST_HEAD(&node->conn_deleting_list); in alloc_peer()
1119 init_waitqueue_head(&node->deleting_list_wq); in alloc_peer()
1120 idr_init(&node->msg_idr); in alloc_peer()
1121 spin_lock_init(&node->idr_lock); in alloc_peer()
1122 idr_init(&node->file_id_idr); in alloc_peer()
1123 spin_lock_init(&node->file_id_lock); in alloc_peer()
1124 INIT_LIST_HEAD(&node->list); in alloc_peer()
1125 kref_init(&node->ref_cnt); in alloc_peer()
1126 node->owner = sbi->seq; in alloc_peer()
1127 node->sbi = sbi; in alloc_peer()
1128 node->version = HMDFS_VERSION; in alloc_peer()
1129 node->status = NODE_STAT_SHAKING; in alloc_peer()
1130 node->conn_time = jiffies; in alloc_peer()
1131 memcpy(node->cid, cid, HMDFS_CID_SIZE); in alloc_peer()
1132 atomic64_set(&node->sb_dirty_count, 0); in alloc_peer()
1133 node->fid_cookie = 0; in alloc_peer()
1134 atomic_set(&node->evt_seq, 0); in alloc_peer()
1135 mutex_init(&node->seq_lock); in alloc_peer()
1136 mutex_init(&node->offline_cb_lock); in alloc_peer()
1137 mutex_init(&node->evt_lock); in alloc_peer()
1138 node->pending_evt = RAW_NODE_EVT_NR; in alloc_peer()
1139 node->last_evt = RAW_NODE_EVT_NR; in alloc_peer()
1140 node->cur_evt[0] = RAW_NODE_EVT_NR; in alloc_peer()
1141 node->cur_evt[1] = RAW_NODE_EVT_NR; in alloc_peer()
1142 node->seq_wr_idx = (unsigned char)UINT_MAX; in alloc_peer()
1143 node->seq_rd_idx = node->seq_wr_idx; in alloc_peer()
1144 INIT_DELAYED_WORK(&node->evt_dwork, hmdfs_node_evt_work); in alloc_peer()
1145 node->msg_idr_process = 0; in alloc_peer()
1146 node->offline_start = false; in alloc_peer()
1147 spin_lock_init(&node->wr_opened_inode_lock); in alloc_peer()
1148 INIT_LIST_HEAD(&node->wr_opened_inode_list); in alloc_peer()
1149 spin_lock_init(&node->stashed_inode_lock); in alloc_peer()
1150 node->stashed_inode_nr = 0; in alloc_peer()
1151 atomic_set(&node->rebuild_inode_status_nr, 0); in alloc_peer()
1152 init_waitqueue_head(&node->rebuild_inode_status_wq); in alloc_peer()
1153 INIT_LIST_HEAD(&node->stashed_inode_list); in alloc_peer()
1154 node->need_rebuild_stash_list = false; in alloc_peer()
1155 node->devsl = devsl; in alloc_peer()
1157 return node; in alloc_peer()
1160 if (node->async_wq) { in alloc_peer()
1161 destroy_workqueue(node->async_wq); in alloc_peer()
1162 node->async_wq = NULL; in alloc_peer()
1164 if (node->req_handle_wq) { in alloc_peer()
1165 destroy_workqueue(node->req_handle_wq); in alloc_peer()
1166 node->req_handle_wq = NULL; in alloc_peer()
1168 if (node->dentry_wq) { in alloc_peer()
1169 destroy_workqueue(node->dentry_wq); in alloc_peer()
1170 node->dentry_wq = NULL; in alloc_peer()
1172 if (node->retry_wb_wq) { in alloc_peer()
1173 destroy_workqueue(node->retry_wb_wq); in alloc_peer()
1174 node->retry_wb_wq = NULL; in alloc_peer()
1176 if (node->reget_conn_wq) { in alloc_peer()
1177 destroy_workqueue(node->reget_conn_wq); in alloc_peer()
1178 node->reget_conn_wq = NULL; in alloc_peer()
1180 kfree(node); in alloc_peer()