Update README.md
Browse files
README.md
CHANGED
@@ -185,9 +185,9 @@ apiVersion: serving.kserve.io/v1beta1
|
|
185 |
kind: InferenceService
|
186 |
metadata:
|
187 |
annotations:
|
188 |
-
openshift.io/display-name:
|
189 |
serving.kserve.io/deploymentMode: RawDeployment
|
190 |
-
name:
|
191 |
labels:
|
192 |
opendatahub.io/dashboard: 'true'
|
193 |
spec:
|
@@ -236,7 +236,7 @@ oc apply -f qwen-inferenceservice.yaml
|
|
236 |
curl https://<inference-service-name>-predictor-default.<domain>/v1/chat/completions
|
237 |
-H "Content-Type: application/json" \
|
238 |
-d '{
|
239 |
-
"model": "
|
240 |
"stream": true,
|
241 |
"stream_options": {
|
242 |
"include_usage": true
|
|
|
185 |
kind: InferenceService
|
186 |
metadata:
|
187 |
annotations:
|
188 |
+
openshift.io/display-name: llama-3-3-70b-instruct # OPTIONAL CHANGE
|
189 |
serving.kserve.io/deploymentMode: RawDeployment
|
190 |
+
name: llama-3-3-70b-instruct # specify model name. This value will be used to invoke the model in the payload
|
191 |
labels:
|
192 |
opendatahub.io/dashboard: 'true'
|
193 |
spec:
|
|
|
236 |
curl https://<inference-service-name>-predictor-default.<domain>/v1/chat/completions
|
237 |
-H "Content-Type: application/json" \
|
238 |
-d '{
|
239 |
+
"model": "llama-3-3-70b-instruct",
|
240 |
"stream": true,
|
241 |
"stream_options": {
|
242 |
"include_usage": true
|