Add io.yaml files for answer relevance intrinsics
Browse files- answer_relevance_classifier/alora/granite-3.3-2b-instruct/io.yaml +58 -0
- answer_relevance_classifier/alora/granite-3.3-8b-instruct/io.yaml +58 -0
- answer_relevance_classifier/lora/granite-3.3-2b-instruct/io.yaml +58 -0
- answer_relevance_classifier/lora/granite-3.3-8b-instruct/io.yaml +58 -0
- answer_relevance_rewriter/alora/granite-3.3-2b-instruct/io.yaml +29 -0
- answer_relevance_rewriter/alora/granite-3.3-8b-instruct/io.yaml +29 -0
- answer_relevance_rewriter/lora/granite-3.3-2b-instruct/io.yaml +29 -0
- answer_relevance_rewriter/lora/granite-3.3-8b-instruct/io.yaml +29 -0
answer_relevance_classifier/alora/granite-3.3-2b-instruct/io.yaml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model name string, or null to use whatever is provided in the chat completion request
|
2 |
+
model: ~
|
3 |
+
# JSON schema of the model's output
|
4 |
+
response_format: |
|
5 |
+
{
|
6 |
+
"properties": {
|
7 |
+
"answer_relevance_analysis": {
|
8 |
+
"title": "Answer Relevance Analysis",
|
9 |
+
"type": "string"
|
10 |
+
},
|
11 |
+
"answer_relevance_category": {
|
12 |
+
"title": "Answer Relevance Category",
|
13 |
+
"type": "string",
|
14 |
+
"enum": [
|
15 |
+
"Pertinent",
|
16 |
+
"Pertinent with relevant extra",
|
17 |
+
"Excessive unnecessary information",
|
18 |
+
"Unduly restrictive",
|
19 |
+
"Too vague or generic",
|
20 |
+
"Contextual misalignment",
|
21 |
+
"Misinterpreted inquiry",
|
22 |
+
"No attempt"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
"answer_relevance_judgment": {
|
26 |
+
"title": "Answer Relevance Judgment",
|
27 |
+
"type": "boolean"
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"required": [
|
31 |
+
"answer_relevance_analysis",
|
32 |
+
"answer_relevance_category",
|
33 |
+
"answer_relevance_judgment"
|
34 |
+
],
|
35 |
+
"title": "AnswerRelevanceRawOutput",
|
36 |
+
"type": "object"
|
37 |
+
}
|
38 |
+
# Additional turn of instructions to add to the chat
|
39 |
+
instruction: "answer_relevance"
|
40 |
+
# Data transformations to perform during post-processing
|
41 |
+
transformations:
|
42 |
+
# Convert categorical answer to continuous value by decoding logprobs
|
43 |
+
- type: likelihood
|
44 |
+
categories_to_values:
|
45 |
+
true: 1.0
|
46 |
+
false: 0.0
|
47 |
+
input_path: ["answer_relevance_judgment"]
|
48 |
+
# Rename answer_relevance_judgment column to reflect likelihood transformation
|
49 |
+
- type: project
|
50 |
+
input_path: []
|
51 |
+
retained_fields:
|
52 |
+
"answer_relevance_analysis": "answer_relevance_analysis"
|
53 |
+
"answer_relevance_category": "answer_relevance_category"
|
54 |
+
"answer_relevance_judgment": "answer_relevance_likelihood"
|
55 |
+
parameters:
|
56 |
+
# Current LoRA can be quite verbose in its explanations.
|
57 |
+
max_completion_tokens: 1024
|
58 |
+
sentence_boundaries: ~
|
answer_relevance_classifier/alora/granite-3.3-8b-instruct/io.yaml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model name string, or null to use whatever is provided in the chat completion request
|
2 |
+
model: ~
|
3 |
+
# JSON schema of the model's output
|
4 |
+
response_format: |
|
5 |
+
{
|
6 |
+
"properties": {
|
7 |
+
"answer_relevance_analysis": {
|
8 |
+
"title": "Answer Relevance Analysis",
|
9 |
+
"type": "string"
|
10 |
+
},
|
11 |
+
"answer_relevance_category": {
|
12 |
+
"title": "Answer Relevance Category",
|
13 |
+
"type": "string",
|
14 |
+
"enum": [
|
15 |
+
"Pertinent",
|
16 |
+
"Pertinent with relevant extra",
|
17 |
+
"Excessive unnecessary information",
|
18 |
+
"Unduly restrictive",
|
19 |
+
"Too vague or generic",
|
20 |
+
"Contextual misalignment",
|
21 |
+
"Misinterpreted inquiry",
|
22 |
+
"No attempt"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
"answer_relevance_judgment": {
|
26 |
+
"title": "Answer Relevance Judgment",
|
27 |
+
"type": "boolean"
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"required": [
|
31 |
+
"answer_relevance_analysis",
|
32 |
+
"answer_relevance_category",
|
33 |
+
"answer_relevance_judgment"
|
34 |
+
],
|
35 |
+
"title": "AnswerRelevanceRawOutput",
|
36 |
+
"type": "object"
|
37 |
+
}
|
38 |
+
# Additional turn of instructions to add to the chat
|
39 |
+
instruction: "answer_relevance"
|
40 |
+
# Data transformations to perform during post-processing
|
41 |
+
transformations:
|
42 |
+
# Convert categorical answer to continuous value by decoding logprobs
|
43 |
+
- type: likelihood
|
44 |
+
categories_to_values:
|
45 |
+
true: 1.0
|
46 |
+
false: 0.0
|
47 |
+
input_path: ["answer_relevance_judgment"]
|
48 |
+
# Rename answer_relevance_judgment column to reflect likelihood transformation
|
49 |
+
- type: project
|
50 |
+
input_path: []
|
51 |
+
retained_fields:
|
52 |
+
"answer_relevance_analysis": "answer_relevance_analysis"
|
53 |
+
"answer_relevance_category": "answer_relevance_category"
|
54 |
+
"answer_relevance_judgment": "answer_relevance_likelihood"
|
55 |
+
parameters:
|
56 |
+
# Current LoRA can be quite verbose in its explanations.
|
57 |
+
max_completion_tokens: 1024
|
58 |
+
sentence_boundaries: ~
|
answer_relevance_classifier/lora/granite-3.3-2b-instruct/io.yaml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model name string, or null to use whatever is provided in the chat completion request
|
2 |
+
model: ~
|
3 |
+
# JSON schema of the model's output
|
4 |
+
response_format: |
|
5 |
+
{
|
6 |
+
"properties": {
|
7 |
+
"answer_relevance_analysis": {
|
8 |
+
"title": "Answer Relevance Analysis",
|
9 |
+
"type": "string"
|
10 |
+
},
|
11 |
+
"answer_relevance_category": {
|
12 |
+
"title": "Answer Relevance Category",
|
13 |
+
"type": "string",
|
14 |
+
"enum": [
|
15 |
+
"Pertinent",
|
16 |
+
"Pertinent with relevant extra",
|
17 |
+
"Excessive unnecessary information",
|
18 |
+
"Unduly restrictive",
|
19 |
+
"Too vague or generic",
|
20 |
+
"Contextual misalignment",
|
21 |
+
"Misinterpreted inquiry",
|
22 |
+
"No attempt"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
"answer_relevance_judgment": {
|
26 |
+
"title": "Answer Relevance Judgment",
|
27 |
+
"type": "boolean"
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"required": [
|
31 |
+
"answer_relevance_analysis",
|
32 |
+
"answer_relevance_category",
|
33 |
+
"answer_relevance_judgment"
|
34 |
+
],
|
35 |
+
"title": "AnswerRelevanceRawOutput",
|
36 |
+
"type": "object"
|
37 |
+
}
|
38 |
+
# Additional turn of instructions to add to the chat
|
39 |
+
instruction: "answer_relevance"
|
40 |
+
# Data transformations to perform during post-processing
|
41 |
+
transformations:
|
42 |
+
# Convert categorical answer to continuous value by decoding logprobs
|
43 |
+
- type: likelihood
|
44 |
+
categories_to_values:
|
45 |
+
true: 1.0
|
46 |
+
false: 0.0
|
47 |
+
input_path: ["answer_relevance_judgment"]
|
48 |
+
# Rename answer_relevance_judgment column to reflect likelihood transformation
|
49 |
+
- type: project
|
50 |
+
input_path: []
|
51 |
+
retained_fields:
|
52 |
+
"answer_relevance_analysis": "answer_relevance_analysis"
|
53 |
+
"answer_relevance_category": "answer_relevance_category"
|
54 |
+
"answer_relevance_judgment": "answer_relevance_likelihood"
|
55 |
+
parameters:
|
56 |
+
# Current LoRA can be quite verbose in its explanations.
|
57 |
+
max_completion_tokens: 1024
|
58 |
+
sentence_boundaries: ~
|
answer_relevance_classifier/lora/granite-3.3-8b-instruct/io.yaml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model name string, or null to use whatever is provided in the chat completion request
|
2 |
+
model: ~
|
3 |
+
# JSON schema of the model's output
|
4 |
+
response_format: |
|
5 |
+
{
|
6 |
+
"properties": {
|
7 |
+
"answer_relevance_analysis": {
|
8 |
+
"title": "Answer Relevance Analysis",
|
9 |
+
"type": "string"
|
10 |
+
},
|
11 |
+
"answer_relevance_category": {
|
12 |
+
"title": "Answer Relevance Category",
|
13 |
+
"type": "string",
|
14 |
+
"enum": [
|
15 |
+
"Pertinent",
|
16 |
+
"Pertinent with relevant extra",
|
17 |
+
"Excessive unnecessary information",
|
18 |
+
"Unduly restrictive",
|
19 |
+
"Too vague or generic",
|
20 |
+
"Contextual misalignment",
|
21 |
+
"Misinterpreted inquiry",
|
22 |
+
"No attempt"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
"answer_relevance_judgment": {
|
26 |
+
"title": "Answer Relevance Judgment",
|
27 |
+
"type": "boolean"
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"required": [
|
31 |
+
"answer_relevance_analysis",
|
32 |
+
"answer_relevance_category",
|
33 |
+
"answer_relevance_judgment"
|
34 |
+
],
|
35 |
+
"title": "AnswerRelevanceRawOutput",
|
36 |
+
"type": "object"
|
37 |
+
}
|
38 |
+
# Additional turn of instructions to add to the chat
|
39 |
+
instruction: "answer_relevance"
|
40 |
+
# Data transformations to perform during post-processing
|
41 |
+
transformations:
|
42 |
+
# Convert categorical answer to continuous value by decoding logprobs
|
43 |
+
- type: likelihood
|
44 |
+
categories_to_values:
|
45 |
+
true: 1.0
|
46 |
+
false: 0.0
|
47 |
+
input_path: ["answer_relevance_judgment"]
|
48 |
+
# Rename answer_relevance_judgment column to reflect likelihood transformation
|
49 |
+
- type: project
|
50 |
+
input_path: []
|
51 |
+
retained_fields:
|
52 |
+
"answer_relevance_analysis": "answer_relevance_analysis"
|
53 |
+
"answer_relevance_category": "answer_relevance_category"
|
54 |
+
"answer_relevance_judgment": "answer_relevance_likelihood"
|
55 |
+
parameters:
|
56 |
+
# Current LoRA can be quite verbose in its explanations.
|
57 |
+
max_completion_tokens: 1024
|
58 |
+
sentence_boundaries: ~
|
answer_relevance_rewriter/alora/granite-3.3-2b-instruct/io.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model name string, or null to use whatever is provided in the chat completion request
|
2 |
+
model: ~
|
3 |
+
# JSON schema of the model's output, or null if constrained decoding is not needed
|
4 |
+
response_format: |
|
5 |
+
{
|
6 |
+
"properties": {
|
7 |
+
"answer_relevance_rewrite": {
|
8 |
+
"title": "Rewritten answer",
|
9 |
+
"type": "string"
|
10 |
+
}
|
11 |
+
},
|
12 |
+
"required": [
|
13 |
+
"answer_relevance_rewrite"
|
14 |
+
],
|
15 |
+
"type": "object"
|
16 |
+
}
|
17 |
+
# Additional turn of instructions to add to the chat
|
18 |
+
instruction: |
|
19 |
+
Rewrite the response for relevance.
|
20 |
+
The last assistant response is considered not fully relevant to the last user inquiry due to {answer_relevance_category}: {answer_relevance_analysis}
|
21 |
+
Decide if you agree with this assessment, then act according to the following instructions:
|
22 |
+
If you disagree with the assessment, provide a verbatim copy of the original response. DO NOT attempt to correct any other perceived defects in the response.
|
23 |
+
If you agree with the assessment, provide an updated response that no longer fit the label {answer_relevance_category}, by {correction_method}. Your response should be entirely based on the provided documents and should not rely on other prior knowledge. Your response should be suitable to be directly provided to the user. It should NOT contain meta information regarding the original response, its assessment, or this instruction. The user does not see any of these. Your response is the only response they will see to their inquiry, in place of the original response.
|
24 |
+
# Data transformations to perform during post-processing
|
25 |
+
transformations: ~
|
26 |
+
parameters:
|
27 |
+
# Rewritten response could be long.
|
28 |
+
max_completion_tokens: 1024
|
29 |
+
sentence_boundaries: ~
|
answer_relevance_rewriter/alora/granite-3.3-8b-instruct/io.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model name string, or null to use whatever is provided in the chat completion request
|
2 |
+
model: ~
|
3 |
+
# JSON schema of the model's output, or null if constrained decoding is not needed
|
4 |
+
response_format: |
|
5 |
+
{
|
6 |
+
"properties": {
|
7 |
+
"answer_relevance_rewrite": {
|
8 |
+
"title": "Rewritten answer",
|
9 |
+
"type": "string"
|
10 |
+
}
|
11 |
+
},
|
12 |
+
"required": [
|
13 |
+
"answer_relevance_rewrite"
|
14 |
+
],
|
15 |
+
"type": "object"
|
16 |
+
}
|
17 |
+
# Additional turn of instructions to add to the chat
|
18 |
+
instruction: |
|
19 |
+
Rewrite the response for relevance.
|
20 |
+
The last assistant response is considered not fully relevant to the last user inquiry due to {answer_relevance_category}: {answer_relevance_analysis}
|
21 |
+
Decide if you agree with this assessment, then act according to the following instructions:
|
22 |
+
If you disagree with the assessment, provide a verbatim copy of the original response. DO NOT attempt to correct any other perceived defects in the response.
|
23 |
+
If you agree with the assessment, provide an updated response that no longer fit the label {answer_relevance_category}, by {correction_method}. Your response should be entirely based on the provided documents and should not rely on other prior knowledge. Your response should be suitable to be directly provided to the user. It should NOT contain meta information regarding the original response, its assessment, or this instruction. The user does not see any of these. Your response is the only response they will see to their inquiry, in place of the original response.
|
24 |
+
# Data transformations to perform during post-processing
|
25 |
+
transformations: ~
|
26 |
+
parameters:
|
27 |
+
# Rewritten response could be long.
|
28 |
+
max_completion_tokens: 1024
|
29 |
+
sentence_boundaries: ~
|
answer_relevance_rewriter/lora/granite-3.3-2b-instruct/io.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model name string, or null to use whatever is provided in the chat completion request
|
2 |
+
model: ~
|
3 |
+
# JSON schema of the model's output, or null if constrained decoding is not needed
|
4 |
+
response_format: |
|
5 |
+
{
|
6 |
+
"properties": {
|
7 |
+
"answer_relevance_rewrite": {
|
8 |
+
"title": "Rewritten answer",
|
9 |
+
"type": "string"
|
10 |
+
}
|
11 |
+
},
|
12 |
+
"required": [
|
13 |
+
"answer_relevance_rewrite"
|
14 |
+
],
|
15 |
+
"type": "object"
|
16 |
+
}
|
17 |
+
# Additional turn of instructions to add to the chat
|
18 |
+
instruction: |
|
19 |
+
Rewrite the response for relevance.
|
20 |
+
The last assistant response is considered not fully relevant to the last user inquiry due to {answer_relevance_category}: {answer_relevance_analysis}
|
21 |
+
Decide if you agree with this assessment, then act according to the following instructions:
|
22 |
+
If you disagree with the assessment, provide a verbatim copy of the original response. DO NOT attempt to correct any other perceived defects in the response.
|
23 |
+
If you agree with the assessment, provide an updated response that no longer fit the label {answer_relevance_category}, by {correction_method}. Your response should be entirely based on the provided documents and should not rely on other prior knowledge. Your response should be suitable to be directly provided to the user. It should NOT contain meta information regarding the original response, its assessment, or this instruction. The user does not see any of these. Your response is the only response they will see to their inquiry, in place of the original response.
|
24 |
+
# Data transformations to perform during post-processing
|
25 |
+
transformations: ~
|
26 |
+
parameters:
|
27 |
+
# Rewritten response could be long.
|
28 |
+
max_completion_tokens: 1024
|
29 |
+
sentence_boundaries: ~
|
answer_relevance_rewriter/lora/granite-3.3-8b-instruct/io.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Model name string, or null to use whatever is provided in the chat completion request
|
2 |
+
model: ~
|
3 |
+
# JSON schema of the model's output, or null if constrained decoding is not needed
|
4 |
+
response_format: |
|
5 |
+
{
|
6 |
+
"properties": {
|
7 |
+
"answer_relevance_rewrite": {
|
8 |
+
"title": "Rewritten answer",
|
9 |
+
"type": "string"
|
10 |
+
}
|
11 |
+
},
|
12 |
+
"required": [
|
13 |
+
"answer_relevance_rewrite"
|
14 |
+
],
|
15 |
+
"type": "object"
|
16 |
+
}
|
17 |
+
# Additional turn of instructions to add to the chat
|
18 |
+
instruction: |
|
19 |
+
Rewrite the response for relevance.
|
20 |
+
The last assistant response is considered not fully relevant to the last user inquiry due to {answer_relevance_category}: {answer_relevance_analysis}
|
21 |
+
Decide if you agree with this assessment, then act according to the following instructions:
|
22 |
+
If you disagree with the assessment, provide a verbatim copy of the original response. DO NOT attempt to correct any other perceived defects in the response.
|
23 |
+
If you agree with the assessment, provide an updated response that no longer fit the label {answer_relevance_category}, by {correction_method}. Your response should be entirely based on the provided documents and should not rely on other prior knowledge. Your response should be suitable to be directly provided to the user. It should NOT contain meta information regarding the original response, its assessment, or this instruction. The user does not see any of these. Your response is the only response they will see to their inquiry, in place of the original response.
|
24 |
+
# Data transformations to perform during post-processing
|
25 |
+
transformations: ~
|
26 |
+
parameters:
|
27 |
+
# Rewritten response could be long.
|
28 |
+
max_completion_tokens: 1024
|
29 |
+
sentence_boundaries: ~
|