|
{ |
|
"featuresDict": { |
|
"features": { |
|
"episode_metadata": { |
|
"featuresDict": { |
|
"features": { |
|
"file_path": { |
|
"description": "Path to the original data file.", |
|
"pythonClassName": "tensorflow_datasets.core.features.text_feature.Text", |
|
"text": {} |
|
} |
|
} |
|
}, |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict" |
|
}, |
|
"steps": { |
|
"pythonClassName": "tensorflow_datasets.core.features.dataset_feature.Dataset", |
|
"sequence": { |
|
"feature": { |
|
"featuresDict": { |
|
"features": { |
|
"action": { |
|
"description": "Robot action, consists of x,y,z goal and picker commandpicker<0.5 = open, picker>0.5 = close.", |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"dtype": "float32", |
|
"encoding": "none", |
|
"shape": { |
|
"dimensions": [ |
|
"4" |
|
] |
|
} |
|
} |
|
}, |
|
"discount": { |
|
"description": "Discount if provided, default to 1.", |
|
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", |
|
"tensor": { |
|
"dtype": "float32", |
|
"encoding": "none", |
|
"shape": {} |
|
} |
|
}, |
|
"is_first": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"dtype": "bool", |
|
"encoding": "none", |
|
"shape": {} |
|
} |
|
}, |
|
"is_last": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"dtype": "bool", |
|
"encoding": "none", |
|
"shape": {} |
|
} |
|
}, |
|
"is_terminal": { |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"dtype": "bool", |
|
"encoding": "none", |
|
"shape": {} |
|
} |
|
}, |
|
"language_embedding": { |
|
"description": "Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5", |
|
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor", |
|
"tensor": { |
|
"dtype": "float32", |
|
"encoding": "none", |
|
"shape": { |
|
"dimensions": [ |
|
"512" |
|
] |
|
} |
|
} |
|
}, |
|
"language_instruction": { |
|
"description": "Language Instruction.", |
|
"pythonClassName": "tensorflow_datasets.core.features.text_feature.Text", |
|
"text": {} |
|
}, |
|
"observation": { |
|
"featuresDict": { |
|
"features": { |
|
"image": { |
|
"description": "Image observation of cloth.", |
|
"image": { |
|
"dtype": "uint8", |
|
"encodingFormat": "png", |
|
"shape": { |
|
"dimensions": [ |
|
"32", |
|
"32", |
|
"3" |
|
] |
|
} |
|
}, |
|
"pythonClassName": "tensorflow_datasets.core.features.image_feature.Image" |
|
} |
|
} |
|
}, |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict" |
|
}, |
|
"reward": { |
|
"description": "Reward as a normalized performance metric in [0, 1].0 = no change from initial state. 1 = perfect fold.-ve performance means the cloth is worse off than initial state.", |
|
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar", |
|
"tensor": { |
|
"dtype": "float32", |
|
"encoding": "none", |
|
"shape": {} |
|
} |
|
} |
|
} |
|
}, |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict" |
|
}, |
|
"length": "-1" |
|
} |
|
} |
|
} |
|
}, |
|
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict" |
|
} |