gauneg commited on
Commit
2769cb4
·
1 Parent(s): 4f3f11f

commit files to HF hub

Browse files
README.md CHANGED
@@ -28,14 +28,14 @@ pip install --upgrade huggingface_hub
28
  from huggingface_hub import hf_hub_download
29
  import sys
30
  # Download the custom model code
31
- bert_gts_pretrained = hf_hub_download(repo_id="gauneg/bert-gts-absa-triple", filename="bert_gts_pretrained.py")
32
  post = hf_hub_download(repo_id="gauneg/bert-gts-absa-triple", filename="post.py")
33
 
34
  sys.path.append(bert_gts_pretrained.rsplit("/", 1)[0])
35
  sys.path.append(post.rsplit("/", 1)[0])
36
 
37
 
38
- from bert_gts_pretrained import GTSBertBaseABSATriple
39
  from post import DecodeAndEvaluate
40
 
41
 
@@ -44,7 +44,7 @@ from transformers import AutoTokenizer
44
 
45
  model_id = 'gauneg/bert-gts-absa-triple'
46
  tokenizer = AutoTokenizer.from_pretrained(model_id)
47
- model = GTSBertBaseABSATriple.from_pretrained(model_id)
48
  dec_and_infer = DecodeAndEvaluate(tokenizer)
49
  test_sentence0 = """I charge it at night and skip taking the cord with me because of the good battery life ."""
50
  test_sentence = "The Dell Inspiron 14 Plus is the most well-rounded laptop with great display and battery life that money can buy."
 
28
  from huggingface_hub import hf_hub_download
29
  import sys
30
  # Download the custom model code
31
+ bert_gts_pretrained = hf_hub_download(repo_id="gauneg/bert-gts-absa-triple", filename="bert_opinion.py")
32
  post = hf_hub_download(repo_id="gauneg/bert-gts-absa-triple", filename="post.py")
33
 
34
  sys.path.append(bert_gts_pretrained.rsplit("/", 1)[0])
35
  sys.path.append(post.rsplit("/", 1)[0])
36
 
37
 
38
+ from bert_gts_pretrained import BertGTSOpinionTriple
39
  from post import DecodeAndEvaluate
40
 
41
 
 
44
 
45
  model_id = 'gauneg/bert-gts-absa-triple'
46
  tokenizer = AutoTokenizer.from_pretrained(model_id)
47
+ model = BertGTSOpinionTriple.from_pretrained(model_id)
48
  dec_and_infer = DecodeAndEvaluate(tokenizer)
49
  test_sentence0 = """I charge it at night and skip taking the cord with me because of the good battery life ."""
50
  test_sentence = "The Dell Inspiron 14 Plus is the most well-rounded laptop with great display and battery life that money can buy."
bert_gts_pretrained.py → bert_opinion.py RENAMED
@@ -3,22 +3,23 @@ from transformers import AutoTokenizer, AutoModel, PreTrainedModel, PretrainedCo
3
  import torch.nn.functional as F
4
 
5
 
6
- class GTSBertBaseABSATripleConfig(PretrainedConfig):
7
- def __init__(self, feat_dim = 768, max_len=64, class_num=6, **kwargs):
 
8
  super().__init__(**kwargs)
9
  self.feat_dim = feat_dim
10
  self.max_len = max_len
11
  self.class_num = class_num
12
 
13
- class GTSBertBaseABSATriple(PreTrainedModel):
14
- config_class = GTSBertBaseABSATripleConfig
15
  def __init__(self, config):
16
  model_id = 'google-bert/bert-base-uncased'
17
  super().__init__(config)
18
  self.model = AutoModel.from_pretrained(model_id)
19
  self.max_seq_len = config.max_len
20
- self.bert_feat_dim = config.feat_dim#768
21
- self.class_num = config.class_num#6
22
  self.cls_linear = torch.nn.Linear(self.bert_feat_dim*2, self.class_num)
23
  self.feature_linear = torch.nn.Linear(self.bert_feat_dim*2+self.class_num*3, self.bert_feat_dim*2)
24
  self.dropout_output = torch.nn.Dropout(0.1)
 
3
  import torch.nn.functional as F
4
 
5
 
6
+ class BertGTSOpinionTripleConfig(PretrainedConfig):
7
+ model_type = 'multi-infer-bert-uncased'
8
+ def __init__(self, feat_dim = 768, max_len=128, class_num=6, **kwargs):
9
  super().__init__(**kwargs)
10
  self.feat_dim = feat_dim
11
  self.max_len = max_len
12
  self.class_num = class_num
13
 
14
+ class BertGTSOpinionTriple(PreTrainedModel):
15
+ config_class = BertGTSOpinionTripleConfig
16
  def __init__(self, config):
17
  model_id = 'google-bert/bert-base-uncased'
18
  super().__init__(config)
19
  self.model = AutoModel.from_pretrained(model_id)
20
  self.max_seq_len = config.max_len
21
+ self.bert_feat_dim = config.feat_dim #768
22
+ self.class_num = config.class_num #6
23
  self.cls_linear = torch.nn.Linear(self.bert_feat_dim*2, self.class_num)
24
  self.feature_linear = torch.nn.Linear(self.bert_feat_dim*2+self.class_num*3, self.bert_feat_dim*2)
25
  self.dropout_output = torch.nn.Dropout(0.1)
config.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
  "architectures": [
3
- "MultiInferBertUncased"
4
  ],
5
  "class_num": 6,
6
  "feat_dim": 768,
7
- "max_len": 64,
8
- "model_type": "gts_opinion_triple",
9
  "torch_dtype": "float32",
10
- "transformers_version": "4.42.3"
11
  }
 
1
  {
2
  "architectures": [
3
+ "BertGTSOpinionTriple"
4
  ],
5
  "class_num": 6,
6
  "feat_dim": 768,
7
+ "max_len": 128,
8
+ "model_type": "multi-infer-bert-uncased",
9
  "torch_dtype": "float32",
10
+ "transformers_version": "4.44.2"
11
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf4d4d02fb6b5d28e9c8008034866fad48cc684579d46c14b8521bdc0e98b736
3
  size 447543680
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07a9f26340c6df71c594416b6dc5813a919cef064c1f540fd0c87377e53fec7e
3
  size 447543680