Update code/inference.py
Browse files- code/inference.py +4 -6
code/inference.py
CHANGED
@@ -2,7 +2,7 @@ import os
|
|
2 |
import json
|
3 |
import torch
|
4 |
import torch.nn as nn
|
5 |
-
from transformers import
|
6 |
import logging
|
7 |
|
8 |
logger = logging.getLogger(__name__)
|
@@ -42,15 +42,13 @@ def model_fn(model_dir, context=None):
|
|
42 |
# Load tokenizer
|
43 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
44 |
|
45 |
-
#
|
46 |
config = AutoConfig.from_pretrained(model_id,
|
47 |
num_labels=2,
|
48 |
-
architectures=["RobertaForSequenceClassification"],
|
49 |
-
model_type="roberta",
|
50 |
trust_remote_code=True)
|
51 |
|
52 |
-
# Load base model
|
53 |
-
base_model =
|
54 |
model_id,
|
55 |
config=config,
|
56 |
torch_dtype=torch.bfloat16 if device.type == 'cuda' else torch.float32,
|
|
|
2 |
import json
|
3 |
import torch
|
4 |
import torch.nn as nn
|
5 |
+
from transformers import AutoModel, AutoTokenizer, AutoConfig
|
6 |
import logging
|
7 |
|
8 |
logger = logging.getLogger(__name__)
|
|
|
42 |
# Load tokenizer
|
43 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
44 |
|
45 |
+
# Load config and specify it's a Phi3Config
|
46 |
config = AutoConfig.from_pretrained(model_id,
|
47 |
num_labels=2,
|
|
|
|
|
48 |
trust_remote_code=True)
|
49 |
|
50 |
+
# Load base model
|
51 |
+
base_model = AutoModel.from_pretrained(
|
52 |
model_id,
|
53 |
config=config,
|
54 |
torch_dtype=torch.bfloat16 if device.type == 'cuda' else torch.float32,
|