PranavBP525 commited on
Commit
7d34933
·
verified ·
1 Parent(s): 249553f

Push model using huggingface_hub.

Browse files
Files changed (4) hide show
  1. README.md +3 -3
  2. pytorch_model.bin +1 -1
  3. tokenizer.json +17 -2
  4. tokenizer_config.json +1 -0
README.md CHANGED
@@ -25,7 +25,7 @@ You can then generate text as follows:
25
  ```python
26
  from transformers import pipeline
27
 
28
- generator = pipeline("text-generation", model="PranavBP525//tmp/tmp_fklzuzb/PranavBP525/phi-2-storygen-v2")
29
  outputs = generator("Hello, my llama is cute")
30
  ```
31
 
@@ -35,8 +35,8 @@ If you want to use the model for training or to obtain the outputs from the valu
35
  from transformers import AutoTokenizer
36
  from trl import AutoModelForCausalLMWithValueHead
37
 
38
- tokenizer = AutoTokenizer.from_pretrained("PranavBP525//tmp/tmp_fklzuzb/PranavBP525/phi-2-storygen-v2")
39
- model = AutoModelForCausalLMWithValueHead.from_pretrained("PranavBP525//tmp/tmp_fklzuzb/PranavBP525/phi-2-storygen-v2")
40
 
41
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
42
  outputs = model(**inputs, labels=inputs["input_ids"])
 
25
  ```python
26
  from transformers import pipeline
27
 
28
+ generator = pipeline("text-generation", model="PranavBP525//tmp/tmpeif2umsr/PranavBP525/phi-2-storygen-v2")
29
  outputs = generator("Hello, my llama is cute")
30
  ```
31
 
 
35
  from transformers import AutoTokenizer
36
  from trl import AutoModelForCausalLMWithValueHead
37
 
38
+ tokenizer = AutoTokenizer.from_pretrained("PranavBP525//tmp/tmpeif2umsr/PranavBP525/phi-2-storygen-v2")
39
+ model = AutoModelForCausalLMWithValueHead.from_pretrained("PranavBP525//tmp/tmpeif2umsr/PranavBP525/phi-2-storygen-v2")
40
 
41
  inputs = tokenizer("Hello, my llama is cute", return_tensors="pt")
42
  outputs = model(**inputs, labels=inputs["input_ids"])
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b7163ca32de414a48138c8ee3ba3ff2d8cb1b201e616202172a060b81fba8b1
3
  size 11772
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8317cb48192b8b09e1b56a9f73fcd2cf82e8f9d2f9e738f3215a4d53ae84cd23
3
  size 11772
tokenizer.json CHANGED
@@ -1,7 +1,21 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
4
- "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 50256,
@@ -382,6 +396,7 @@
382
  "end_of_word_suffix": "",
383
  "fuse_unk": false,
384
  "byte_fallback": false,
 
385
  "vocab": {
386
  "!": 0,
387
  "\"": 1,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 1024,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 1024
12
+ },
13
+ "direction": "Left",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 50256,
16
+ "pad_type_id": 0,
17
+ "pad_token": "<|endoftext|>"
18
+ },
19
  "added_tokens": [
20
  {
21
  "id": 50256,
 
396
  "end_of_word_suffix": "",
397
  "fuse_unk": false,
398
  "byte_fallback": false,
399
+ "ignore_merges": false,
400
  "vocab": {
401
  "!": 0,
402
  "\"": 1,
tokenizer_config.json CHANGED
@@ -320,6 +320,7 @@
320
  "model_max_length": 2048,
321
  "pad_token": "<|endoftext|>",
322
  "padding_side": "left",
 
323
  "tokenizer_class": "CodeGenTokenizer",
324
  "unk_token": "<|endoftext|>"
325
  }
 
320
  "model_max_length": 2048,
321
  "pad_token": "<|endoftext|>",
322
  "padding_side": "left",
323
+ "return_token_type_ids": false,
324
  "tokenizer_class": "CodeGenTokenizer",
325
  "unk_token": "<|endoftext|>"
326
  }