ZhuofengLi commited on
Commit
9dc103a
·
verified ·
1 Parent(s): 59b0ba9

Upload folder using huggingface_hub

Browse files
arxiv/arxiv.md CHANGED
@@ -17,7 +17,7 @@ Node classification tasks in the arxiv dataset include predicting the paper's ca
17
 
18
  * Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh-Han Wu, Yuxiao Dong, and Anshul Kanakia.Microsoft academic graph: When experts are not enough. Quantitative Science Studies, 1(1):396–413, 2020
19
 
20
- You can directly download the raw dataset without edge texts (citation context) from the following codes:
21
 
22
  ```python
23
  from ogb.nodeproppred import PygNodePropPredDataset
 
17
 
18
  * Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh-Han Wu, Yuxiao Dong, and Anshul Kanakia.Microsoft academic graph: When experts are not enough. Quantitative Science Studies, 1(1):396–413, 2020
19
 
20
+ You can directly get the raw dataset from following codes:
21
 
22
  ```python
23
  from ogb.nodeproppred import PygNodePropPredDataset
arxiv/emb/arxiv_bert_base_uncased_512_cls_edge.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac49e42ef2e2509f236854cdb0a6ed32816a12abb67b0a2278fab1f0839c43f
3
+ size 1791350701
arxiv/emb/arxiv_bert_base_uncased_512_cls_node.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ca5fe5986a551aa3bafd00ba33037522fa30ee4f9aac2d91340fa3b6d25198e
3
+ size 260112301
arxiv/emb/arxiv_bert_large_uncased_512_cls_edge.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cb97fb3973c645f6ea4103d51e33008e9ba1885521410498c1cd0b359fc8b94
3
+ size 2388467122
arxiv/emb/arxiv_bert_large_uncased_512_cls_node.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15640c1948a6a0de9a6992fa39be8ce5ef7dc2c5584b0ae95c8d63631897cee4
3
+ size 346815922
arxiv/processed/arxiv.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd4f548fe6c16ec81306633a35da2cd3915a4fce004fbb31f9ae7f658fd1d63a
3
- size 388600042
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c79dbeaf0b9bc1458c04d385c492742987cd02a08beda123387ca392cb27e59
3
+ size 389843902
arxiv/tmp.ipynb ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 8,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "/home/lizhuofeng/.local/lib/python3.10/site-packages/torch/storage.py:414: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.\n",
13
+ " return torch.load(io.BytesIO(b))\n"
14
+ ]
15
+ },
16
+ {
17
+ "name": "stdout",
18
+ "output_type": "stream",
19
+ "text": [
20
+ "Data(edge_index=[2, 1166243], text_nodes=[169343], text_edges=[1166243], node_labels=[169343])\n"
21
+ ]
22
+ }
23
+ ],
24
+ "source": [
25
+ "import pickle \n",
26
+ "\n",
27
+ "with open('processed/arxiv.pkl', 'rb') as f:\n",
28
+ " data = pickle.load(f)\n",
29
+ "\n",
30
+ "print(data)"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": 11,
36
+ "metadata": {},
37
+ "outputs": [
38
+ {
39
+ "name": "stdout",
40
+ "output_type": "stream",
41
+ "text": [
42
+ "0\n"
43
+ ]
44
+ }
45
+ ],
46
+ "source": [
47
+ "count = 0\n",
48
+ "for j, i, in enumerate(data.text_nodes):\n",
49
+ " if i == None:\n",
50
+ " count += 1\n",
51
+ " data.text_nodes[j] = ''\n",
52
+ "\n",
53
+ "\n",
54
+ "for j, i in enumerate(data.text_edges):\n",
55
+ " if i == None:\n",
56
+ " count += 1\n",
57
+ " data.text_edges[j] = ''\n",
58
+ "print(count)"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": 10,
64
+ "metadata": {},
65
+ "outputs": [],
66
+ "source": [
67
+ "count = 0"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": 12,
73
+ "metadata": {},
74
+ "outputs": [],
75
+ "source": [
76
+ "with open('processed/arxiv.pkl', 'wb') as f:\n",
77
+ " pickle.dump(data, f)"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": null,
83
+ "metadata": {},
84
+ "outputs": [],
85
+ "source": []
86
+ }
87
+ ],
88
+ "metadata": {
89
+ "kernelspec": {
90
+ "display_name": "Python 3",
91
+ "language": "python",
92
+ "name": "python3"
93
+ },
94
+ "language_info": {
95
+ "codemirror_mode": {
96
+ "name": "ipython",
97
+ "version": 3
98
+ },
99
+ "file_extension": ".py",
100
+ "mimetype": "text/x-python",
101
+ "name": "python",
102
+ "nbconvert_exporter": "python",
103
+ "pygments_lexer": "ipython3",
104
+ "version": "3.10.12"
105
+ }
106
+ },
107
+ "nbformat": 4,
108
+ "nbformat_minor": 2
109
+ }
twitter/emb/tweets_bert_base_uncased_512_cls_edge.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:050b7c8b64cd1fa509b1c31f1ae1b13f13a298e3726306a0f2bd808cdb0fe14a
3
  size 114688434
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d22f6c2918334bc862d19a0f55f7669c47283eb2056631612521c193e0aa6438
3
  size 114688434
twitter/emb/tweets_bert_base_uncased_512_cls_node.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b874ce830fbf92010bb2bf6617a0519d79ac407798f9bcbd28c81e3e003e167
3
  size 93367154
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7a312cdb158d74c970c447f46e51355f9cc85d8fa0f3cea1f161dfe2978325c
3
  size 93367154
twitter/emb/tweets_bert_large_uncased_512_cls_edge.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86102df08c60b5a757a5ebd4526287e667283e8f30de30df3a567a8276e24cfc
3
+ size 152917431
twitter/emb/tweets_bert_large_uncased_512_cls_node.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c6e3ea52aaf5af001587cc3be1df17192a363c6ca71e2ead45893ae1deb448b
3
+ size 124489143
twitter/processed/twitter.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be2e69838e019287a3d5d91f2b8d81f17125c90cebf8137358a99160287924ab
3
- size 8407098
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73792b04968c3cb94930a8f84db6e99e96605816ea590d9171328af5e2ebd14a
3
+ size 7971933
twitter/raw/process_final_twitter.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 13,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
@@ -17,7 +17,7 @@
17
  },
18
  {
19
  "cell_type": "code",
20
- "execution_count": 2,
21
  "metadata": {},
22
  "outputs": [],
23
  "source": [
@@ -31,14 +31,14 @@
31
  },
32
  {
33
  "cell_type": "code",
34
- "execution_count": 5,
35
  "metadata": {},
36
  "outputs": [
37
  {
38
  "name": "stderr",
39
  "output_type": "stream",
40
  "text": [
41
- "67682it [00:04, 15665.18it/s]\n"
42
  ]
43
  }
44
  ],
@@ -50,21 +50,25 @@
50
  "text_edges = []\n",
51
  "text_nodes = [-1] * len(df) * 20\n",
52
  "count = 0\n",
 
53
  "\n",
54
  "# Use df instead of g for iteration\n",
55
  "for _, row in tqdm.tqdm(df.iterrows()):\n",
56
  " # Convert tweet_id and user_id to string to ensure consistency\n",
57
- " tweet_id = str(row['tweet_id'])\n",
58
- " user_id = str(row['user_id'])\n",
59
- " \n",
60
  " if tweet_id not in tweet_id2idx:\n",
61
  " tweet_id2idx[tweet_id] = count\n",
62
  " tweet.append(count)\n",
63
  " count += 1\n",
64
  " else:\n",
65
  " tweet.append(tweet_id2idx[tweet_id])\n",
66
- " text_nodes[tweet_id2idx[tweet_id]] = f\"tweet{tweet_id2idx[tweet_id]} of event{row['event_id']}\"\n",
67
- " \n",
 
 
 
68
  " if user_id not in user_id2idx:\n",
69
  " user_id2idx[user_id] = count\n",
70
  " user.append(count)\n",
@@ -72,10 +76,11 @@
72
  " else:\n",
73
  " user.append(user_id2idx[user_id])\n",
74
  " text_nodes[user_id2idx[user_id]] = f\"user\"\n",
75
- " \n",
76
- " text_edges.append(row['text'])\n",
77
- " \n",
78
- " for mention in row['user_mentions']:\n",
 
79
  " if mention not in user_id2idx:\n",
80
  " user_id2idx[mention] = count\n",
81
  " user.append(count)\n",
@@ -84,21 +89,23 @@
84
  " user.append(user_id2idx[mention])\n",
85
  " tweet.append(tweet_id2idx[tweet_id])\n",
86
  " text_nodes[user_id2idx[mention]] = f\"mentioned user\"\n",
87
- " text_edges.append(row['text'])"
 
88
  ]
89
  },
90
  {
91
  "cell_type": "code",
92
- "execution_count": 8,
93
  "metadata": {},
94
  "outputs": [],
95
  "source": [
96
- "text_nodes = text_nodes[:count]"
 
97
  ]
98
  },
99
  {
100
  "cell_type": "code",
101
- "execution_count": 10,
102
  "metadata": {},
103
  "outputs": [],
104
  "source": [
@@ -106,26 +113,20 @@
106
  "graph = Data(\n",
107
  "\t\t\ttext_nodes=text_nodes,\n",
108
  "\t\t\ttext_edges=text_edges,\n",
109
- "\t\t\tedge_index=torch.tensor(edge_index, dtype=torch.long)\n",
 
110
  "\t\t)"
111
  ]
112
  },
113
  {
114
  "cell_type": "code",
115
- "execution_count": 21,
116
  "metadata": {},
117
  "outputs": [],
118
  "source": [
119
  "with open('../processed/twitter.pkl', 'wb') as f:\n",
120
  " pickle.dump(graph, f)"
121
  ]
122
- },
123
- {
124
- "cell_type": "code",
125
- "execution_count": null,
126
- "metadata": {},
127
- "outputs": [],
128
- "source": []
129
  }
130
  ],
131
  "metadata": {
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
 
17
  },
18
  {
19
  "cell_type": "code",
20
+ "execution_count": 3,
21
  "metadata": {},
22
  "outputs": [],
23
  "source": [
 
31
  },
32
  {
33
  "cell_type": "code",
34
+ "execution_count": 17,
35
  "metadata": {},
36
  "outputs": [
37
  {
38
  "name": "stderr",
39
  "output_type": "stream",
40
  "text": [
41
+ "67682it [00:04, 14848.15it/s]\n"
42
  ]
43
  }
44
  ],
 
50
  "text_edges = []\n",
51
  "text_nodes = [-1] * len(df) * 20\n",
52
  "count = 0\n",
53
+ "node_labels = [-1] * len(df) * 20\n",
54
  "\n",
55
  "# Use df instead of g for iteration\n",
56
  "for _, row in tqdm.tqdm(df.iterrows()):\n",
57
  " # Convert tweet_id and user_id to string to ensure consistency\n",
58
+ " tweet_id = str(row[\"tweet_id\"])\n",
59
+ " user_id = str(row[\"user_id\"])\n",
60
+ "\n",
61
  " if tweet_id not in tweet_id2idx:\n",
62
  " tweet_id2idx[tweet_id] = count\n",
63
  " tweet.append(count)\n",
64
  " count += 1\n",
65
  " else:\n",
66
  " tweet.append(tweet_id2idx[tweet_id])\n",
67
+ " text_nodes[tweet_id2idx[tweet_id]] = (\n",
68
+ " f\"tweet\"\n",
69
+ " )\n",
70
+ " node_labels[tweet_id2idx[tweet_id]] = str(row[\"event_id\"])\n",
71
+ "\n",
72
  " if user_id not in user_id2idx:\n",
73
  " user_id2idx[user_id] = count\n",
74
  " user.append(count)\n",
 
76
  " else:\n",
77
  " user.append(user_id2idx[user_id])\n",
78
  " text_nodes[user_id2idx[user_id]] = f\"user\"\n",
79
+ " node_labels[user_id2idx[user_id]] = -1\n",
80
+ "\n",
81
+ " text_edges.append(row[\"text\"])\n",
82
+ "\n",
83
+ " for mention in row[\"user_mentions\"]:\n",
84
  " if mention not in user_id2idx:\n",
85
  " user_id2idx[mention] = count\n",
86
  " user.append(count)\n",
 
89
  " user.append(user_id2idx[mention])\n",
90
  " tweet.append(tweet_id2idx[tweet_id])\n",
91
  " text_nodes[user_id2idx[mention]] = f\"mentioned user\"\n",
92
+ " node_labels[user_id2idx[mention]] = -1\n",
93
+ " text_edges.append(row[\"text\"])"
94
  ]
95
  },
96
  {
97
  "cell_type": "code",
98
+ "execution_count": 18,
99
  "metadata": {},
100
  "outputs": [],
101
  "source": [
102
+ "text_nodes = text_nodes[:count]\n",
103
+ "node_labels = node_labels[:count]"
104
  ]
105
  },
106
  {
107
  "cell_type": "code",
108
+ "execution_count": 19,
109
  "metadata": {},
110
  "outputs": [],
111
  "source": [
 
113
  "graph = Data(\n",
114
  "\t\t\ttext_nodes=text_nodes,\n",
115
  "\t\t\ttext_edges=text_edges,\n",
116
+ "\t\t\tedge_index=torch.tensor(edge_index, dtype=torch.long),\n",
117
+ "\t\t\tnode_labels=node_labels\n",
118
  "\t\t)"
119
  ]
120
  },
121
  {
122
  "cell_type": "code",
123
+ "execution_count": 20,
124
  "metadata": {},
125
  "outputs": [],
126
  "source": [
127
  "with open('../processed/twitter.pkl', 'wb') as f:\n",
128
  " pickle.dump(graph, f)"
129
  ]
 
 
 
 
 
 
 
130
  }
131
  ],
132
  "metadata": {