Datasets:
id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
0 | from setuptools import find_packages, setup
import os
import subprocess
import time
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content | null |
1 | from setuptools import find_packages, setup
import os
import subprocess
import time
version_file = 'realesrgan/version.py'
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
__gitsha__ = '{}'
version_info = ({})
"""
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str) | null |
2 | from setuptools import find_packages, setup
import os
import subprocess
import time
version_file = 'realesrgan/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
3 | from setuptools import find_packages, setup
import os
import subprocess
import time
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires | null |
4 | import argparse
import cv2
import glob
import mimetypes
import numpy as np
import os
import shutil
import subprocess
import torch
from basicsr.archs.rrdbnet_arch import RRDBNet
from basicsr.utils.download_util import load_file_from_url
from os import path as osp
from tqdm import tqdm
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
def get_video_meta_info(video_path):
ret = {}
probe = ffmpeg.probe(video_path)
video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
has_audio = any(stream['codec_type'] == 'audio' for stream in probe['streams'])
ret['width'] = video_streams[0]['width']
ret['height'] = video_streams[0]['height']
ret['fps'] = eval(video_streams[0]['avg_frame_rate'])
ret['audio'] = ffmpeg.input(video_path).audio if has_audio else None
ret['nb_frames'] = int(video_streams[0]['nb_frames'])
return ret
def get_sub_video(args, num_process, process_idx):
if num_process == 1:
return args.input
meta = get_video_meta_info(args.input)
duration = int(meta['nb_frames'] / meta['fps'])
part_time = duration // num_process
print(f'duration: {duration}, part_time: {part_time}')
os.makedirs(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'), exist_ok=True)
out_path = osp.join(args.output, f'{args.video_name}_inp_tmp_videos', f'{process_idx:03d}.mp4')
cmd = [
args.ffmpeg_bin, f'-i {args.input}', '-ss', f'{part_time * process_idx}',
f'-to {part_time * (process_idx + 1)}' if process_idx != num_process - 1 else '', '-async 1', out_path, '-y'
]
print(' '.join(cmd))
subprocess.call(' '.join(cmd), shell=True)
return out_path | null |
5 | import argparse
import cv2
import glob
import mimetypes
import numpy as np
import os
import shutil
import subprocess
import torch
from basicsr.archs.rrdbnet_arch import RRDBNet
from basicsr.utils.download_util import load_file_from_url
from os import path as osp
from tqdm import tqdm
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
def inference_video(args, video_save_path, device=None, total_workers=1, worker_idx=0):
def run(args):
args.video_name = osp.splitext(os.path.basename(args.input))[0]
video_save_path = osp.join(args.output, f'{args.video_name}_{args.suffix}.mp4')
if args.extract_frame_first:
tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames')
os.makedirs(tmp_frames_folder, exist_ok=True)
os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {tmp_frames_folder}/frame%08d.png')
args.input = tmp_frames_folder
num_gpus = torch.cuda.device_count()
num_process = num_gpus * args.num_process_per_gpu
if num_process == 1:
inference_video(args, video_save_path)
return
ctx = torch.multiprocessing.get_context('spawn')
pool = ctx.Pool(num_process)
os.makedirs(osp.join(args.output, f'{args.video_name}_out_tmp_videos'), exist_ok=True)
pbar = tqdm(total=num_process, unit='sub_video', desc='inference')
for i in range(num_process):
sub_video_save_path = osp.join(args.output, f'{args.video_name}_out_tmp_videos', f'{i:03d}.mp4')
pool.apply_async(
inference_video,
args=(args, sub_video_save_path, torch.device(i % num_gpus), num_process, i),
callback=lambda arg: pbar.update(1))
pool.close()
pool.join()
# combine sub videos
# prepare vidlist.txt
with open(f'{args.output}/{args.video_name}_vidlist.txt', 'w') as f:
for i in range(num_process):
f.write(f'file \'{args.video_name}_out_tmp_videos/{i:03d}.mp4\'\n')
cmd = [
args.ffmpeg_bin, '-f', 'concat', '-safe', '0', '-i', f'{args.output}/{args.video_name}_vidlist.txt', '-c',
'copy', f'{video_save_path}'
]
print(' '.join(cmd))
subprocess.call(cmd)
shutil.rmtree(osp.join(args.output, f'{args.video_name}_out_tmp_videos'))
if osp.exists(osp.join(args.output, f'{args.video_name}_inp_tmp_videos')):
shutil.rmtree(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'))
os.remove(f'{args.output}/{args.video_name}_vidlist.txt') | null |
6 | import os
os.system('pip install gfpgan')
os.system('python setup.py develop')
import cv2
import shutil
import tempfile
import torch
from basicsr.archs.rrdbnet_arch import RRDBNet
from basicsr.archs.srvgg_arch import SRVGGNetCompact
from realesrgan.utils import RealESRGANer
def clean_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(f'Failed to delete {file_path}. Reason: {e}') | null |
7 | import argparse
import cv2
import numpy as np
import os
import sys
from basicsr.utils import scandir
from multiprocessing import Pool
from os import path as osp
from tqdm import tqdm
def worker(path, opt):
"""Worker for each process.
Args:
path (str): Image path.
opt (dict): Configuration dict. It contains:
crop_size (int): Crop size.
step (int): Step for overlapped sliding window.
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
save_folder (str): Path to save folder.
compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.
Returns:
process_info (str): Process information displayed in progress bar.
"""
crop_size = opt['crop_size']
step = opt['step']
thresh_size = opt['thresh_size']
img_name, extension = osp.splitext(osp.basename(path))
# remove the x2, x3, x4 and x8 in the filename for DIV2K
img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '')
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
h, w = img.shape[0:2]
h_space = np.arange(0, h - crop_size + 1, step)
if h - (h_space[-1] + crop_size) > thresh_size:
h_space = np.append(h_space, h - crop_size)
w_space = np.arange(0, w - crop_size + 1, step)
if w - (w_space[-1] + crop_size) > thresh_size:
w_space = np.append(w_space, w - crop_size)
index = 0
for x in h_space:
for y in w_space:
index += 1
cropped_img = img[x:x + crop_size, y:y + crop_size, ...]
cropped_img = np.ascontiguousarray(cropped_img)
cv2.imwrite(
osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img,
[cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
process_info = f'Processing {img_name} ...'
return process_info
The provided code snippet includes necessary dependencies for implementing the `extract_subimages` function. Write a Python function `def extract_subimages(opt)` to solve the following problem:
Crop images to subimages. Args: opt (dict): Configuration dict. It contains: input_folder (str): Path to the input folder. save_folder (str): Path to save folder. n_thread (int): Thread number.
Here is the function:
def extract_subimages(opt):
"""Crop images to subimages.
Args:
opt (dict): Configuration dict. It contains:
input_folder (str): Path to the input folder.
save_folder (str): Path to save folder.
n_thread (int): Thread number.
"""
input_folder = opt['input_folder']
save_folder = opt['save_folder']
if not osp.exists(save_folder):
os.makedirs(save_folder)
print(f'mkdir {save_folder} ...')
else:
print(f'Folder {save_folder} already exists. Exit.')
sys.exit(1)
# scan all images
img_list = list(scandir(input_folder, full_path=True))
pbar = tqdm(total=len(img_list), unit='image', desc='Extract')
pool = Pool(opt['n_thread'])
for path in img_list:
pool.apply_async(worker, args=(path, opt), callback=lambda arg: pbar.update(1))
pool.close()
pool.join()
pbar.close()
print('All processes done.') | Crop images to subimages. Args: opt (dict): Configuration dict. It contains: input_folder (str): Path to the input folder. save_folder (str): Path to save folder. n_thread (int): Thread number. |
8 | import cv2
import numpy as np
from PIL import Image
def rotate_array(image: np.ndarray, angle: float) -> np.ndarray:
if angle == 0:
return image
h, w = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
return cv2.warpAffine(image, M, (w, h))
def rotate_image(image: Image, angle: float) -> Image:
if angle == 0:
return image
return Image.fromarray(rotate_array(np.array(image), angle)) | null |
9 | import traceback
from typing import Dict
from scripts.io.util import load_classes_from_directory
from scripts.use_cases.face_detector import FaceDetector
from scripts.use_cases.face_processor import FaceProcessor
from scripts.use_cases.mask_generator import MaskGenerator
def create(all_classes, type: str) -> Dict:
d = {}
for cls in all_classes:
try:
c = cls()
d[c.name().lower()] = c
except Exception as e:
print(traceback.format_exc())
print(f"Face Editor: {cls}, Error: {e}")
return d
def load_classes_from_directory(base_class: Type, installer: bool = False) -> List[Type]:
if not installer:
all_classes = load_classes_from_directory_(base_class, inferencers_dir, False)
else:
all_classes = []
for component in shared.opts.data.get("face_editor_additional_components", []):
all_classes.extend(
load_classes_from_directory_(base_class, os.path.join(inferencers_dir, component), installer)
)
return all_classes
class FaceDetector(ABC):
def name(self) -> str:
pass
def detect_faces(self, image: Image, **kwargs) -> List[Rect]:
pass
def load_face_detector() -> Dict[str, FaceDetector]:
return create(load_classes_from_directory(FaceDetector), "FaceDetector") | null |
10 | import traceback
from typing import Dict
from scripts.io.util import load_classes_from_directory
from scripts.use_cases.face_detector import FaceDetector
from scripts.use_cases.face_processor import FaceProcessor
from scripts.use_cases.mask_generator import MaskGenerator
def create(all_classes, type: str) -> Dict:
def load_classes_from_directory(base_class: Type, installer: bool = False) -> List[Type]:
class FaceProcessor(ABC):
def name(self) -> str:
def process(self, face: Face, p: StableDiffusionProcessingImg2Img, **kwargs) -> Image:
def load_face_processor() -> Dict[str, FaceProcessor]:
return create(load_classes_from_directory(FaceProcessor), "FaceProcessor") | null |
11 | import traceback
from typing import Dict
from scripts.io.util import load_classes_from_directory
from scripts.use_cases.face_detector import FaceDetector
from scripts.use_cases.face_processor import FaceProcessor
from scripts.use_cases.mask_generator import MaskGenerator
def create(all_classes, type: str) -> Dict:
d = {}
for cls in all_classes:
try:
c = cls()
d[c.name().lower()] = c
except Exception as e:
print(traceback.format_exc())
print(f"Face Editor: {cls}, Error: {e}")
return d
def load_classes_from_directory(base_class: Type, installer: bool = False) -> List[Type]:
if not installer:
all_classes = load_classes_from_directory_(base_class, inferencers_dir, False)
else:
all_classes = []
for component in shared.opts.data.get("face_editor_additional_components", []):
all_classes.extend(
load_classes_from_directory_(base_class, os.path.join(inferencers_dir, component), installer)
)
return all_classes
class MaskGenerator(ABC):
def name(self) -> str:
pass
def generate_mask(
self,
face_image: np.ndarray,
face_area_on_image: Tuple[int, int, int, int],
**kwargs,
) -> np.ndarray:
pass
def mask_non_face_areas(image: np.ndarray, face_area_on_image: Tuple[int, int, int, int]) -> np.ndarray:
left, top, right, bottom = face_area_on_image
image = image.copy()
image[:top, :] = 0
image[bottom:, :] = 0
image[:, :left] = 0
image[:, right:] = 0
return image
def calculate_mask_coverage(mask: np.ndarray):
gray_mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY)
non_black_pixels = np.count_nonzero(gray_mask)
total_pixels = gray_mask.size
return non_black_pixels / total_pixels
def load_mask_generator() -> Dict[str, MaskGenerator]:
return create(load_classes_from_directory(MaskGenerator), "MaskGenerator") | null |
12 | import operator
from typing import Dict
from lark import Lark, Tree
def starts_with(a, b):
return a.startswith(b) | null |
13 | import operator
from typing import Dict
from lark import Lark, Tree
def ends_with(a, b):
return a.endswith(b) | null |
14 | import operator
from typing import Dict
from lark import Lark, Tree
def contains(a, b):
return b in a | null |
15 | import operator
from typing import Dict
from lark import Lark, Tree
def not_contains(a, b):
return b not in a | null |
16 | import operator
from typing import Dict
from lark import Lark, Tree
def evaluate(query: str, attributes: Dict[str, str]) -> bool:
def validate(query: str):
return evaluate(query, {}) | null |
17 | from typing import Dict, List, Optional, Union
from pydantic import BaseModel, root_validator, validator
class Worker(BaseModel):
name: str
params: Optional[Dict]
def default_params(cls, values):
if "params" not in values or values["params"] is None:
values["params"] = {}
return values
def lowercase_name(cls, v):
return v.lower()
def parse_worker_field(value: Union[str, Dict, Worker]) -> Worker:
if isinstance(value, Dict):
return Worker(**value)
if isinstance(value, str):
return Worker(name=value)
return value | null |
18 | import os
import gradio as gr
from modules import script_callbacks, shared
from scripts.entities.option import Option
from scripts.io.util import inferencers_dir
from scripts.ui import workflow_editor
from scripts.ui.param_value_parser import ParamValueParser
inferencers_dir = os.path.join(get_path("scripts", "inferencers"))
def on_ui_settings():
section = ("face_editor", "Face Editor")
shared.opts.add_option(
"face_editor_search_subdirectories",
shared.OptionInfo(False, "Search workflows in subdirectories", gr.Checkbox, section=section),
)
additional_components = []
with os.scandir(inferencers_dir) as entries:
for entry in entries:
if entry.is_dir() and entry.name[0].isalnum():
additional_components.append(entry.name)
shared.opts.add_option(
"face_editor_additional_components",
shared.OptionInfo(
[], "Additional components", gr.CheckboxGroup, {"choices": additional_components}, section=section
),
)
shared.opts.add_option(
"face_editor_save_original_on_detection_fail",
shared.OptionInfo(True, "Save original image if face detection fails", gr.Checkbox, section=section),
)
shared.opts.add_option(
"face_editor_correct_tilt",
shared.OptionInfo(False, "Adjust tilt for detected faces", gr.Checkbox, section=section),
)
shared.opts.add_option(
"face_editor_auto_face_size_by_model",
shared.OptionInfo(False, "Auto face size adjustment by model", gr.Checkbox, section=section),
)
shared.opts.add_option(
"face_editor_script_index",
shared.OptionInfo(
99,
"The position in postprocess at which this script will be executed; "
"0 means it will be executed before any scripts, 99 means it will probably be executed last.",
gr.Slider,
{"minimum": 0, "maximum": 99, "step": 1},
section=section,
),
) | null |
19 | import json
import os
from typing import Any, Dict, List
import gradio as gr
from modules import shared
from pydantic import ValidationError
from scripts.io.util import workflows_dir
from scripts.use_cases.workflow_manager import WorkflowManager
def load_workflow(file: str) -> str:
if file is not None:
filepath = os.path.join(workflows_dir, file + ".json")
if os.path.isfile(filepath):
return open(filepath).read()
return ""
def get_filename(file: str) -> str:
if file == "default":
return ""
return file
def sync_selection(file: str) -> str:
return file
def save_workflow(name: str, workflow: str) -> str:
if name is None or len(name) == 0:
return ""
with open(os.path.join(workflows_dir, name + ".json"), "w") as file:
file.write(workflow)
return f"Saved to {name}.json"
def get_files() -> List[str]:
search_subdirectories = shared.opts.data.get("face_editor_search_subdirectories", False)
files = []
for root, _, filenames in os.walk(workflows_dir):
if not search_subdirectories and not os.path.samefile(root, workflows_dir):
continue
for filename in filenames:
if filename.endswith(".json"):
relative_path, _ = os.path.splitext(os.path.relpath(os.path.join(root, filename), workflows_dir))
files.append(relative_path)
return files
def refresh_files(workflow: str, file: str) -> dict:
files = get_files()
kwargs: Dict[str, Any] = {"choices": files}
if workflow:
for file in files:
if load_workflow(file) == workflow:
kwargs["value"] = file
break
return gr.update(**kwargs)
def validate_workflow(workflow: str) -> str:
try:
json.loads(workflow)
WorkflowManager.get(workflow)
return "No errors found in the Workflow."
except json.JSONDecodeError as e:
return f"Error in JSON: {str(e)}"
except ValidationError as e:
errors = e.errors()
if len(errors) == 0:
return f"{str(e)}"
err = errors[-1]
return f"{' -> '.join(str(er) for er in err['loc'])} {err['msg']}\n--\n{str(e)}"
except Exception as e:
return f"{str(e)}"
def build(workflow_selector: gr.Dropdown):
with gr.Blocks(title="Workflow"):
with gr.Row():
filename_dropdown = gr.Dropdown(
choices=get_files(),
label="Choose a Workflow",
value="default",
scale=2,
min_width=400,
show_label=False,
)
refresh_button = gr.Button(value="🔄", scale=0, size="sm", elem_classes="tool")
with gr.Row():
filename_input = gr.Textbox(scale=2, show_label=False, placeholder="Save as")
save_button = gr.Button(value="💾", scale=0, size="sm", elem_classes="tool")
workflow_editor = gr.Code(language="json", label="Workflow", value=load_workflow("default"))
with gr.Row():
json_status = gr.Textbox(scale=2, show_label=False)
validate_button = gr.Button(value="✅", scale=0, size="sm", elem_classes="tool")
filename_dropdown.input(load_workflow, inputs=[filename_dropdown], outputs=[workflow_editor])
filename_dropdown.change(get_filename, inputs=[filename_dropdown], outputs=[filename_input])
filename_dropdown.input(sync_selection, inputs=[filename_dropdown], outputs=[workflow_selector])
workflow_selector.input(load_workflow, inputs=[workflow_selector], outputs=[workflow_editor])
workflow_selector.input(get_filename, inputs=[workflow_selector], outputs=[filename_input])
workflow_selector.input(sync_selection, inputs=[workflow_selector], outputs=[filename_dropdown])
save_button.click(validate_workflow, inputs=[workflow_editor], outputs=[json_status])
save_button.click(save_workflow, inputs=[filename_input, workflow_editor])
refresh_button.click(refresh_files, inputs=[workflow_editor, filename_dropdown], outputs=[filename_dropdown])
refresh_button.click(refresh_files, inputs=[workflow_editor, filename_dropdown], outputs=[workflow_selector])
validate_button.click(validate_workflow, inputs=[workflow_editor], outputs=[json_status])
return workflow_editor | null |
20 | import cv2
import numpy as np
from modules.processing import StableDiffusionProcessingImg2Img
from PIL import Image
from scripts.entities.face import Face
from scripts.use_cases.face_processor import FaceProcessor
def color_generator(colors):
while True:
for color in colors:
yield color | null |
21 | import importlib.util
import inspect
import os
from typing import List, Type
import modules.scripts as scripts
from modules import shared
def get_path(*p: str) -> str:
dir = os.path.join(scripts.basedir(), *p)
if not os.path.isdir(dir):
dir = os.path.join(scripts.basedir(), "extensions", "sd-face-editor", *p)
if not os.path.isdir(dir):
raise RuntimeError(f"not found:{dir}")
return dir | null |
22 | import seqio
import t5.data
from t5.data.glue_utils import get_glue_weight_mapping
from t5.data.glue_utils import get_super_glue_weight_mapping
from t5.data.glue_utils import get_super_glue_weight_mapping_sentinel
import t5.data.tasks
_GLUE_WEIGHT_MAPPING = get_glue_weight_mapping()
_SUPER_GLUE_WEIGHT_MAPPING = get_super_glue_weight_mapping()
def _dedupe(name):
rate = None
if name in _GLUE_WEIGHT_MAPPING:
rate = _GLUE_WEIGHT_MAPPING[name]
elif name in _SUPER_GLUE_WEIGHT_MAPPING:
rate = _SUPER_GLUE_WEIGHT_MAPPING[name]
if rate is None:
return t5.data.rate_num_examples
if "glue" in name and "rte" in name:
rate *= 0.5
return rate | null |
23 | import seqio
import t5.data
from t5.data.glue_utils import get_glue_weight_mapping
from t5.data.glue_utils import get_super_glue_weight_mapping
from t5.data.glue_utils import get_super_glue_weight_mapping_sentinel
import t5.data.tasks
_GLUE_WEIGHT_MAPPING = get_glue_weight_mapping()
_SUPER_GLUE_WEIGHT_MAPPING = get_super_glue_weight_mapping()
def assign_weight_or_rate_num_examples(name):
if name in _GLUE_WEIGHT_MAPPING:
return _GLUE_WEIGHT_MAPPING[name]
elif name in _SUPER_GLUE_WEIGHT_MAPPING:
return _SUPER_GLUE_WEIGHT_MAPPING[name]
else:
return t5.data.rate_num_examples | null |
24 | import gin
import seqio
DEFAULT_SPM_PATH = "gs://t5-data/vocabs/cc_all.32000/sentencepiece.model"
DEFAULT_EXTRA_IDS = 100
def get_default_vocabulary():
return seqio.SentencePieceVocabulary(DEFAULT_SPM_PATH, DEFAULT_EXTRA_IDS) | null |
25 | import gin
import seqio
The provided code snippet includes necessary dependencies for implementing the `rate_num_examples` function. Write a Python function `def rate_num_examples( task, maximum=None, temperature=1.0, scale=1.0, fallback_to_num_input_examples=True)` to solve the following problem:
Mixing rate equal to the number of examples for the task.
Here is the function:
def rate_num_examples(
task, maximum=None, temperature=1.0, scale=1.0,
fallback_to_num_input_examples=True):
"""Mixing rate equal to the number of examples for the task."""
return seqio.mixing_rate_num_examples(
task=task, maximum=maximum, scale=scale, temperature=temperature,
fallback_to_num_input_examples=fallback_to_num_input_examples) | Mixing rate equal to the number of examples for the task. |
26 | import gin
import seqio
The provided code snippet includes necessary dependencies for implementing the `rate_unsupervised` function. Write a Python function `def rate_unsupervised(task, value=1e6)` to solve the following problem:
Gin-configurable mixing rate for the unsupervised co-training task.
Here is the function:
def rate_unsupervised(task, value=1e6):
"""Gin-configurable mixing rate for the unsupervised co-training task."""
del task
return value | Gin-configurable mixing rate for the unsupervised co-training task. |
27 | import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `lower_text` function. Write a Python function `def lower_text(string, **unused_kwargs)` to solve the following problem:
Lowercases text.
Here is the function:
def lower_text(string, **unused_kwargs):
"""Lowercases text."""
return string.lower() | Lowercases text. |
28 | import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `qa` function. Write a Python function `def qa(answer, example=None, is_target=False)` to solve the following problem:
Returns answer, or all answers if the full example is provided.
Here is the function:
def qa(answer, example=None, is_target=False):
"""Returns answer, or all answers if the full example is provided."""
if is_target:
return [tf.compat.as_text(a) for a in example["answers"]]
return answer | Returns answer, or all answers if the full example is provided. |
29 | import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `span_qa` function. Write a Python function `def span_qa(answer, example=None, is_target=False)` to solve the following problem:
Returns answer, or a dict with answers and context if the example is provided.
Here is the function:
def span_qa(answer, example=None, is_target=False):
"""Returns answer, or a dict with answers and context if the example is provided."""
if is_target:
return {
"answers": [tf.compat.as_text(a) for a in example["answers"]],
"context": tf.compat.as_text(example["context"])
}
return answer | Returns answer, or a dict with answers and context if the example is provided. |
30 | import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `wsc_simple` function. Write a Python function `def wsc_simple(prediction, example=None, is_target=False)` to solve the following problem:
Sees whether we predicted the referent or not.
Here is the function:
def wsc_simple(prediction, example=None, is_target=False):
"""Sees whether we predicted the referent or not."""
if is_target:
return example["label"]
determiners = {
"a", "an", "few", "her", "his", "each", "every", "many", "much", "my",
"our", "some", "that", "the", "their", "these", "this", "those", "which",
"whose", "your"
}
def clean(s):
"""Ignore capitalization and determiners."""
s = tf.compat.as_text(s).strip().lower()
return " ".join([w for w in s.split(" ") if w not in determiners])
prediction = clean(prediction)
if not prediction:
# We don't want an empty prediction to accidentally return 0 and spuriously
# match the label.
return -1
# We aren't using the label but rather using the extracted referent so that we
# can see if the prediction is equivalent to the referent.
referent = clean(example["targets_pretokenized"])
if ("'" in prediction) != ("'" in referent):
# Make sure we don't mark cases where the prediction is "Bob" and the
# referent is "Bob's hat" as predicting the referent.
predicted_referent = False
else:
prediction_words = set(prediction.split(" "))
referent_words = set(referent.split(" "))
# Handle cases where the prediction is "fuzzy bunny" and the referent is
# "bunny".
predicted_referent = prediction_words.issubset(
referent_words) or referent_words.issubset(prediction_words)
return int(predicted_referent) | Sees whether we predicted the referent or not. |
31 | import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `rank_classification` function. Write a Python function `def rank_classification(score, example=None, is_target=False, passthrough_feature_keys=None)` to solve the following problem:
A postprocessor for the `rank_classification` preprocessor and metric.
Here is the function:
def rank_classification(score,
example=None,
is_target=False,
passthrough_feature_keys=None):
"""A postprocessor for the `rank_classification` preprocessor and metric."""
if is_target:
outputs = [
tuple(example["idx"]), example["is_correct"],
example.get("weight", 1.0),
len(example["targets"])
]
if passthrough_feature_keys:
for key in passthrough_feature_keys:
outputs.append(example[key])
return tuple(outputs)
else:
return score | A postprocessor for the `rank_classification` preprocessor and metric. |
32 | import collections
import functools
from t5.data import postprocessors
from t5.data import preprocessors
from t5.evaluation import metrics
GLUE_WEIGHT_MAPPING = {
"glue_cola_v002": 8_551.,
"glue_sst2_v002": 67_349.,
"glue_mrpc_v002": 3_668.,
"glue_qqp_v002": 363_849.,
"glue_stsb_v002": 5_749.,
"glue_mnli_v002": 392_702.,
"glue_qnli_v002": 104_743.,
"glue_rte_v002": 2_490.,
"glue_mnli_mismatched_v002": 0.,
"glue_mnli_matched_v002": 0.,
"glue_ax_v002": 0.,
}
def get_glue_weight_mapping():
return GLUE_WEIGHT_MAPPING | null |
33 | import collections
import functools
from t5.data import postprocessors
from t5.data import preprocessors
from t5.evaluation import metrics
SUPER_GLUE_WEIGHT_MAPPING = {
"dpr_v001_simple": 1_322.,
"super_glue_wsc_v102_simple_train": 259.,
"super_glue_wsc_v102_simple_eval": 0.,
"super_glue_boolq_v102": 9_427.,
"super_glue_cb_v102": 250.,
"super_glue_copa_v102": 400.,
"super_glue_multirc_v102": 27_243.,
"super_glue_record_v102": 138_854.,
"super_glue_rte_v102": 2_490.,
"super_glue_wic_v102": 5_428.,
"super_glue_axb_v102": 0.,
"super_glue_axg_v102": 0.,
}
def get_super_glue_weight_mapping():
return SUPER_GLUE_WEIGHT_MAPPING | null |
34 | import collections
import functools
from t5.data import postprocessors
from t5.data import preprocessors
from t5.evaluation import metrics
SUPER_GLUE_WEIGHT_MAPPING_SENTINEL = {
"dpr_v001_simple_1_sentinel": 1_322.,
"super_glue_wsc_v102_simple_1_sentinel_train": 259.,
"super_glue_wsc_v102_simple_1_sentinel_eval": 0.,
"super_glue_boolq_v102_1_sentinel": 9_427.,
"super_glue_cb_v102_1_sentinel": 250.,
"super_glue_copa_v102_1_sentinel": 400.,
"super_glue_multirc_v102_1_sentinel": 27_243.,
"super_glue_record_v102_1_sentinel": 138_854.,
"super_glue_rte_v102_1_sentinel": 2_490.,
"super_glue_wic_v102_1_sentinel": 5_428.,
"super_glue_axb_v102_1_sentinel": 0.,
"super_glue_axg_v102_1_sentinel": 0.,
}
def get_super_glue_weight_mapping_sentinel():
return SUPER_GLUE_WEIGHT_MAPPING_SENTINEL | null |
35 | import collections
import functools
from t5.data import postprocessors
from t5.data import preprocessors
from t5.evaluation import metrics
The provided code snippet includes necessary dependencies for implementing the `get_glue_text_preprocessor` function. Write a Python function `def get_glue_text_preprocessor(builder_config)` to solve the following problem:
Return the glue preprocessor. Args: builder_config: a BuilderConfig Returns: a preprocessor function
Here is the function:
def get_glue_text_preprocessor(builder_config):
"""Return the glue preprocessor.
Args:
builder_config: a BuilderConfig
Returns:
a preprocessor function
"""
# stsb uses a floating point target, so use special preprocessor
if builder_config.name == "stsb":
return preprocessors.stsb
elif builder_config.name == "wsc.fixed":
return preprocessors.wsc
elif builder_config.name == "record":
return preprocessors.record
else:
if "mnli" in builder_config.name or builder_config.name == "ax":
# Cast the GLUE diagnostic task as MNLI.
benchmark_name = "mnli"
elif builder_config.name in ["axb", "axg"]:
# Cast the SuperGLUE diagnostic tasks as RTE.
benchmark_name = "rte"
else:
benchmark_name = builder_config.name
if builder_config.name == "multirc":
feature_names = ("question", "answer", "paragraph")
elif builder_config.name == "wic":
# This ignores the start/end indices which show where in each sentence the
# word appears.
# TODO(craffel): Investigate using those indices.
feature_names = ("sentence1", "sentence2", "word")
else:
feature_names = None
return functools.partial(
preprocessors.glue,
benchmark_name=benchmark_name,
label_names=builder_config.label_classes,
feature_names=feature_names) | Return the glue preprocessor. Args: builder_config: a BuilderConfig Returns: a preprocessor function |
36 | import collections
import functools
from t5.data import postprocessors
from t5.data import preprocessors
from t5.evaluation import metrics
def get_glue_postprocess_fn(builder_config):
if builder_config.name == "stsb":
return postprocessors.string_to_float
elif builder_config.name == "multirc":
return postprocessors.multirc
elif builder_config.name == "record":
return postprocessors.record
else:
return functools.partial(
postprocessors.string_label_to_class_id,
label_classes=builder_config.label_classes,
) | null |
37 | import collections
import functools
from t5.data import postprocessors
from t5.data import preprocessors
from t5.evaluation import metrics
GLUE_METRICS = collections.OrderedDict([
("cola", [metrics.sklearn_metrics_wrapper(
"matthews_corrcoef", metric_post_process_fn=lambda x: 100 * x)]),
("sst2", [metrics.accuracy]),
("mrpc", [metrics.f1_score_with_invalid, metrics.accuracy]),
("stsb", [metrics.pearson_corrcoef, metrics.spearman_corrcoef]),
("qqp", [metrics.f1_score_with_invalid, metrics.accuracy]),
("mnli", [metrics.accuracy]),
("mnli_matched", [metrics.accuracy]),
("mnli_mismatched", [metrics.accuracy]),
("qnli", [metrics.accuracy]),
("rte", [metrics.accuracy]),
("wnli", [metrics.accuracy]),
("ax", []), # Only test set available.
])
def get_glue_metric(task_name):
return GLUE_METRICS[task_name] | null |
38 | import collections
import functools
from t5.data import postprocessors
from t5.data import preprocessors
from t5.evaluation import metrics
SUPERGLUE_METRICS = collections.OrderedDict([
("boolq", [metrics.accuracy]),
("cb", [metrics.mean_multiclass_f1(num_classes=3), metrics.accuracy]),
("copa", [metrics.accuracy]),
("multirc", [
metrics.multirc_f1_over_all_answers,
metrics.mean_group_metric(metrics.all_match)
]),
("record", [metrics.deduplicate_metric(metrics.squad)]),
("rte", [metrics.accuracy]),
("wic", [metrics.accuracy]),
("axb", []), # Only test set available.
("axg", []), # Only test set available.
])
def get_super_glue_metric(task_name):
return SUPERGLUE_METRICS[task_name] | null |
39 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `summarize` function. Write a Python function `def summarize(x, article_key, summary_key)` to solve the following problem:
Convert a summarization dataset to a text2text pair. For example, say the dataset returns examples of this format: {'article': <article>, 'highlights': <summary>} If article_key = 'article', summary_key = 'highlights', then the outputs will have the format: {'inputs': 'summarize': <article>, 'targets': <summary>} Args: x: an example to process. article_key: the feature key for the article to summarize. summary_key: the feature key for the target summary. Returns: A preprocessed example with the format listed above.
Here is the function:
def summarize(x, article_key, summary_key):
"""Convert a summarization dataset to a text2text pair.
For example, say the dataset returns examples of this format:
{'article': <article>, 'highlights': <summary>}
If article_key = 'article', summary_key = 'highlights', then the outputs will
have the format:
{'inputs': 'summarize': <article>, 'targets': <summary>}
Args:
x: an example to process.
article_key: the feature key for the article to summarize.
summary_key: the feature key for the target summary.
Returns:
A preprocessed example with the format listed above.
"""
strs_to_join = ['summarize:', x[article_key]]
return {
'inputs': tf.strings.join(strs_to_join, separator=' '),
'targets': x[summary_key],
} | Convert a summarization dataset to a text2text pair. For example, say the dataset returns examples of this format: {'article': <article>, 'highlights': <summary>} If article_key = 'article', summary_key = 'highlights', then the outputs will have the format: {'inputs': 'summarize': <article>, 'targets': <summary>} Args: x: an example to process. article_key: the feature key for the article to summarize. summary_key: the feature key for the target summary. Returns: A preprocessed example with the format listed above. |
40 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
NON_SPACED_LANGUAGE_RANGES = (
'\u1000-\u104f', # Burmese
'\u4e00-\u9fff', # CJK Unified Ideographs
'\u3400-\u4dbf', # CJK Unified Ideographs Extension A
'\uf900-\ufaff', # CJK Compatibility Ideographs
'\u2e80-\u2eff', # CJK Radicals Supplement
'\u31c0-\u31ef', # CJK Strokes
'\u3000-\u303f', # CJK Symbols and Punctuation
'\u3040-\u309f', # Japanese Hiragana
'\u30a0-\u30ff', # Japanese Katakana
'\ua980-\ua9df', # Javanese
'\u1780-\u17ff', # Khmer
'\u19e0-\u19ff', # Khmer Symbols
'\u0e80-\u0eff', # Lao
'\u1980-\u19df', # Tai Lue
'\u1a20-\u1aaf', # Tai Tham
'\u0e00-\u0e7f', # Thai
'\u0f00-\u0fff', # Tibetan
)
The provided code snippet includes necessary dependencies for implementing the `pad_nonspaced_languages` function. Write a Python function `def pad_nonspaced_languages(x, text_key='text')` to solve the following problem:
Pad non-spaced languages with spaces around each character. Args: x: an example to process. text_key: a string, the key for the text feature to preprocess in the dataset examples. Returns: A preprocessed example.
Here is the function:
def pad_nonspaced_languages(x, text_key='text'):
"""Pad non-spaced languages with spaces around each character.
Args:
x: an example to process.
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
Returns:
A preprocessed example.
"""
res = dict(x)
text = res[text_key]
# Add spaces around any character from a non-spaced language.
pattern = ''.join(NON_SPACED_LANGUAGE_RANGES)
text = tf.strings.regex_replace(text, u'([{}])'.format(pattern), r' \1 ')
# Collapse consecutive whitespace into one space.
text = tf.strings.regex_replace(text, r'\s+', ' ')
res[text_key] = text
return res | Pad non-spaced languages with spaces around each character. Args: x: an example to process. text_key: a string, the key for the text feature to preprocess in the dataset examples. Returns: A preprocessed example. |
41 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
def _pad_punctuation(text):
"""Adds spaces around punctuation."""
# Add space around punctuation.
text = tf.strings.regex_replace(text, r'([[:punct:]])', r' \1 ')
# Collapse consecutive whitespace into one space.
text = tf.strings.regex_replace(text, r'\s+', ' ')
return text
def _string_join(lst):
# Join on space, but collapse consecutive spaces.
out = tf.strings.join(lst, separator=' ')
return tf.strings.regex_replace(out, r'\s+', ' ')
The provided code snippet includes necessary dependencies for implementing the `trivia_qa` function. Write a Python function `def trivia_qa(dataset)` to solve the following problem:
Convert a TriviaQA example to multiple flattened examples. TriviaQA produces examples with this form: {'entity_pages': {dict of wiki entities}, 'search_results': <dict of web search results>, 'answer': {dict of all answers}, 'question': <question>, 'question_id': <question_id>, 'question_source': <question_source>} This function will return flattend examples of the format: {'inputs': 'question: <question> context: <article>' 'targets': 'answer: <sampled answer>'} Args: dataset: a tf.data.Dataset to process. Returns: A preprocessed tf.data.Dataset with the format listed above.
Here is the function:
def trivia_qa(dataset):
"""Convert a TriviaQA example to multiple flattened examples.
TriviaQA produces examples with this form:
{'entity_pages': {dict of wiki entities},
'search_results': <dict of web search results>,
'answer': {dict of all answers}, 'question': <question>,
'question_id': <question_id>, 'question_source': <question_source>}
This function will return flattend examples of the format:
{'inputs': 'question: <question> context: <article>'
'targets': 'answer: <sampled answer>'}
Args:
dataset: a tf.data.Dataset to process.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def triviaqa_question_answer_context(x):
"""Extracts matched contexts and answers.
Returns all matched (question-context, answer) pairs.
Args:
x: A tfds sample.
Returns:
Flattened samples: (question-context, answer).
"""
contexts = []
if 'entity_pages' in x:
contexts.append(x['entity_pages']['wiki_context'])
if 'search_results' in x:
contexts.append(x['search_results']['search_context'])
contexts = tf.concat(contexts, 0)
q = _pad_punctuation(x['question'])
answers = x['answer']['normalized_aliases']
combination_size = tf.size(answers)*tf.size(contexts)
find_answers = tf.TensorArray(
tf.bool, size=combination_size, dynamic_size=True)
selected_answers = tf.TensorArray(
tf.string, size=combination_size, dynamic_size=True)
join_q_c = tf.TensorArray(
tf.string, size=combination_size, dynamic_size=True)
def cond_fn(i, find_answers, selected_answers, join_q_c):
del find_answers, selected_answers, join_q_c # Unused
return tf.less(i, combination_size)
def body_fn(i, find_answers, selected_answers, join_q_c):
"""Find answers from contexts and join."""
context_idx = tf.math.floordiv(i, tf.size(answers))
answer_idx = tf.math.mod(i, tf.size(answers))
a = _pad_punctuation(answers[answer_idx])
a_ = tf.strings.join(['.*', a, '.*'])
c = _pad_punctuation(contexts[context_idx])
find_a = tf.strings.regex_full_match(
tf.strings.lower(c),
tf.strings.lower(a_))
find_answers = find_answers.write(i, find_a)
selected_answers = selected_answers.write(i, a)
join_q_c_str = _string_join(['question:', q, 'context:', c])
join_q_c = join_q_c.write(i, join_q_c_str)
return (i + 1, find_answers, selected_answers, join_q_c)
_, find_answers, selected_answers, join_q_c = tf.while_loop(
cond_fn,
body_fn,
loop_vars=[
tf.constant(0), find_answers, selected_answers,
join_q_c
])
find_answers = find_answers.stack()
selected_answers = selected_answers.stack()
join_q_c = join_q_c.stack()
selected_answers = tf.boolean_mask(selected_answers, find_answers)
selected_join_q_c = tf.boolean_mask(join_q_c, find_answers)
return selected_join_q_c, selected_answers
def my_fn(x):
"""Create TriviaQA example."""
join_q_c, a = triviaqa_question_answer_context(x)
return {
'inputs': join_q_c,
'targets': a
}
dataset = dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
return dataset.unbatch() | Convert a TriviaQA example to multiple flattened examples. TriviaQA produces examples with this form: {'entity_pages': {dict of wiki entities}, 'search_results': <dict of web search results>, 'answer': {dict of all answers}, 'question': <question>, 'question_id': <question_id>, 'question_source': <question_source>} This function will return flattend examples of the format: {'inputs': 'question: <question> context: <article>' 'targets': 'answer: <sampled answer>'} Args: dataset: a tf.data.Dataset to process. Returns: A preprocessed tf.data.Dataset with the format listed above. |
42 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
def squad(x, include_context=True):
"""Convert SQuAD examples to a text2text pair.
SQuAD produces examples with this form:
{'id': <id>, context': <article>, 'question': <question>,
'answers': { 'text': [<n answers>] }}
This function will return examples of the format:
{'inputs': 'question: <question> context: <article>',
'targets': '<answer_0>',
'id': <id>, 'question': <question>, 'context': <context>,
'answers': [<n answers>]},
Args:
x: an example to process.
include_context: a boolean
Returns:
A preprocessed example with the format listed above.
"""
a = _pad_punctuation(x['answers']['text'])
q = _pad_punctuation(x['question'])
c = _pad_punctuation(x['context'])
if include_context:
inputs = _string_join(['question:', q, 'context:', c])
else:
inputs = _string_join(['squad trivia question:', q])
return {
'inputs': inputs,
'targets': a[0],
'id': x['id'],
'context': c,
'question': q,
'answers': a
}
def _span_answer(context, answer_text):
"""Finds start/end indices of answer_text in context after space tokenization.
If answer_tokens is not a sublist of context_tokens, returns empty string.
Args:
context: 0-d string tensor
answer_text: 0-d string
Returns:
A string tensor.
"""
def space_tok(s):
"""Replace non-word chars with space then split on space."""
s = tf.strings.regex_replace(s, r'\W', ' ')
return tf.strings.split(input=[s], sep=' ').values
def find_subseq(n, h):
"""Finds index of needle subsequence inside haystack.
Args:
n: 1-d tensor
h: 1-d tensor same type as n
Returns:
Index of start of n if found found; otherwise -1.
"""
l_n = tf.size(n)
l_h = tf.size(h)
found = -1
for i in tf.range(0, l_h - l_n):
if tf.reduce_all(tf.equal(h[i:i+l_n], n)):
found = i
break
return found
answer_tokens = space_tok(answer_text)
context_tokens = space_tok(context)
start = find_subseq(answer_tokens, context_tokens)
end = start + tf.size(answer_tokens) - 1
# Just take the first candidate that matches exactly.
if tf.equal(start, -1):
return ''
return tf.strings.format('start: {} end: {}', [start, end])
The provided code snippet includes necessary dependencies for implementing the `squad_span_space_tokenized` function. Write a Python function `def squad_span_space_tokenized(dataset)` to solve the following problem:
Convert SQuAD examples to a text2text pair with span output. SQuAD produces examples with this form: {'context': <article>, 'question': <question>, 'answers': { 'text': [<all answers>] }} This function returns examples with the format {'inputs': 'context: <article> question: <question>', 'targets': 'start: <start_index> end: <end_index>'} where <start_index> and <end_index> specify the space-tokenized span start/end indices. Both <start_index> and <end_index> are included in the answer. In the case where the tokenized answer is not found in the tokenized context, the example is skipped. Args: dataset: a tf.data.Dataset to process. Returns: A preprocessed tf.data.Dataset with the format listed above.
Here is the function:
def squad_span_space_tokenized(dataset):
"""Convert SQuAD examples to a text2text pair with span output.
SQuAD produces examples with this form:
{'context': <article>, 'question': <question>,
'answers': { 'text': [<all answers>] }}
This function returns examples with the format
{'inputs': 'context: <article> question: <question>',
'targets': 'start: <start_index> end: <end_index>'}
where <start_index> and <end_index> specify the space-tokenized span
start/end indices. Both <start_index> and <end_index> are included in
the answer. In the case where the tokenized answer is
not found in the tokenized context, the example is skipped.
Args:
dataset: a tf.data.Dataset to process.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def my_fn(x):
"""Create squad example as in squad_span_char, but tokenized on spaces."""
res = dict(x)
res['targets'] = _span_answer(x['context'], x['targets'])
return res
dataset = squad(dataset)
dataset = dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
return dataset.filter(lambda x: tf.strings.length(x['targets']) > 0) | Convert SQuAD examples to a text2text pair with span output. SQuAD produces examples with this form: {'context': <article>, 'question': <question>, 'answers': { 'text': [<all answers>] }} This function returns examples with the format {'inputs': 'context: <article> question: <question>', 'targets': 'start: <start_index> end: <end_index>'} where <start_index> and <end_index> specify the space-tokenized span start/end indices. Both <start_index> and <end_index> are included in the answer. In the case where the tokenized answer is not found in the tokenized context, the example is skipped. Args: dataset: a tf.data.Dataset to process. Returns: A preprocessed tf.data.Dataset with the format listed above. |
43 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `random_split_text` function. Write a Python function `def random_split_text(dataset, text_key='text', min_words_per_segment=16, max_words_per_segment=512, max_words_total=8192)` to solve the following problem:
Randomly split single-string examples into multiple examples each. Segment lengths are chosen according to a log-uniform distribution. Each incoming string is chopped into multiple equal-length examples with the last one possibly being shorter. If the input string is longer than max_words_total, then we use one random chunk and discard the rest. This may help with model stability. The intended use case is to break up long text examples for use in unsupervised transfer-learning. We don't really want to use this preprocessor for any dataset which has a well-defined evaluation procedure. If apply this preprocessor e.g. in an MT component, then the evaluation job will randomly split text when evaluating and the BLEU will get funky. Args: dataset: a tf.data.Dataset with dictionaries containing the key text_key text_key: a string min_words_per_segment: an integer max_words_per_segment: an integer max_words_total: an integer Returns: a dataset
Here is the function:
def random_split_text(dataset,
text_key='text',
min_words_per_segment=16,
max_words_per_segment=512,
max_words_total=8192):
"""Randomly split single-string examples into multiple examples each.
Segment lengths are chosen according to a log-uniform distribution.
Each incoming string is chopped into multiple equal-length examples
with the last one possibly being shorter.
If the input string is longer than max_words_total, then we use one random
chunk and discard the rest. This may help with model stability.
The intended use case is to break up long text examples for use in
unsupervised transfer-learning.
We don't really want to use this preprocessor for any dataset which has a
well-defined evaluation procedure. If apply this preprocessor e.g. in an MT
component, then the evaluation job will randomly split text when evaluating
and the BLEU will get funky.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key text_key
text_key: a string
min_words_per_segment: an integer
max_words_per_segment: an integer
max_words_total: an integer
Returns:
a dataset
"""
def random_chunk(x, chunk_size, seed):
"""Pick a random chunk of a 1d Tensor.
The tensor is divided into chunks of length chunk_size, with the last
chunk being potentially smaller. A random chunk is returned.
Args:
x: a 1d tf.Tensor.
chunk_size: an integer.
seed: int32 [2]-Tensor, the random seed.
Returns:
a 1d tf.Tensor with length <= chunk_size.
"""
size = tf.size(x)
num_chunks = tf.maximum(1, (size - 1) // chunk_size + 1)
chunk_num = tf.random.stateless_uniform(
[],
seed=seed,
minval=0,
maxval=num_chunks,
dtype=tf.int32)
return x[chunk_size * chunk_num:chunk_size * (chunk_num + 1)]
@seqio.map_over_dataset(num_seeds=2)
def my_fn(x, seeds):
"""Split one string into multiple strings.
Args:
x: a feature dictionary
seeds: an int32 Tensor, shaped (2, 2), the random seeds.
Returns:
a feature dictionary
"""
text = x[text_key]
words = tf.strings.split([text]).values
if max_words_total:
words = random_chunk(words, max_words_total, seed=seeds[0])
n_words = tf.size(words)
# first pick a length (number of words per segment)
length = tf.cast(
tf.exp(
tf.random.stateless_uniform(
[],
minval=math.log(min_words_per_segment),
maxval=math.log(max_words_per_segment),
seed=seeds[1],
)
),
tf.int32)
# Pad to a multiple of length, then use tf.reshape to split up the words
# into num_segments segments each of the given length.
num_segments = tf.cast(
tf.math.ceil(
tf.cast(n_words, tf.float32) / tf.cast(length, tf.float32)
),
tf.int32)
padding = num_segments * length - n_words
words = tf.pad(words, [[0, padding]])
words = tf.reshape(words, [-1, length])
# Finally, join with spaces and strip. The padding turns into a bunch of
# spaces that get stripped out.
words = tf.strings.reduce_join(words, axis=1, separator=' ')
return {text_key: tf.strings.strip(words)}
return my_fn(dataset).unbatch() | Randomly split single-string examples into multiple examples each. Segment lengths are chosen according to a log-uniform distribution. Each incoming string is chopped into multiple equal-length examples with the last one possibly being shorter. If the input string is longer than max_words_total, then we use one random chunk and discard the rest. This may help with model stability. The intended use case is to break up long text examples for use in unsupervised transfer-learning. We don't really want to use this preprocessor for any dataset which has a well-defined evaluation procedure. If apply this preprocessor e.g. in an MT component, then the evaluation job will randomly split text when evaluating and the BLEU will get funky. Args: dataset: a tf.data.Dataset with dictionaries containing the key text_key text_key: a string min_words_per_segment: an integer max_words_per_segment: an integer max_words_total: an integer Returns: a dataset |
44 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def split_text_to_words(dataset, text_key='text', min_num_words=2):
"""Split text to words and filter out examples with too few words."""
def split(x):
res = dict(x)
res['words'] = tf.strings.split([x[text_key]]).values
return res
dataset = dataset.map(split, num_parallel_calls=AUTOTUNE)
return dataset.filter(lambda x: tf.size(x['words']) >= min_num_words)
The provided code snippet includes necessary dependencies for implementing the `fill_in_the_blank` function. Write a Python function `def fill_in_the_blank(dataset, text_key='text', label='fill: ')` to solve the following problem:
Create a dataset consisting of fill-in-the-blank text examples. The input examples should have a key text_key associated with a tf.string value. The output examples have keys 'inputs' and 'targets'. The input string is split on whitespace to form a sequence of words. This sequence is chopped randomly into segments of one or more words. Alternate segments are included in the inputs and targets, with a special word 'X' marking a missing segment. The given label is prepended to the inputs. Each input string produces two examples - one the inverse of the other. Inputs with less than two words are dropped. EXAMPLE: input: { 'text': 'The fat cat sat on the mat.' } outputs: { 'inputs': 'fill: The fat X the X' 'targets': 'X cat sat on X mat.' } { 'inputs': 'fill: X cat sat on X mat.' 'targets': 'The fat X the X' } Args: dataset: a tf.data.Dataset text_key: a string, the key for the text feature to preprocess in the dataset examples. label: a string, the label to prepend to the inputs. Returns: a tf.data.Dataset
Here is the function:
def fill_in_the_blank(dataset,
text_key='text',
label='fill: '):
"""Create a dataset consisting of fill-in-the-blank text examples.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'inputs' and 'targets'.
The input string is split on whitespace to form a sequence of words.
This sequence is chopped randomly into segments of one or more words.
Alternate segments are included in the inputs and targets, with a special
word 'X' marking a missing segment.
The given label is prepended to the inputs. Each input string produces two
examples - one the inverse of the other. Inputs with less than two words
are dropped.
EXAMPLE:
input:
{
'text': 'The fat cat sat on the mat.'
}
outputs:
{
'inputs': 'fill: The fat X the X'
'targets': 'X cat sat on X mat.'
}
{
'inputs': 'fill: X cat sat on X mat.'
'targets': 'The fat X the X'
}
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
@seqio.map_over_dataset(num_seeds=3)
def my_fn(x, seeds):
"""Generates two preprocessed examples that are roughly inverses.
Args:
x: an example dict with text pre-split in `words` feature.
seeds: an int32 Tensor, shaped (3, 2), the random seeds.
Returns:
an example dict with two inputs and two targets, one for each resulting
preprocessed example.
"""
words = x['words']
n_words = tf.size(words)
# First select the break probability. We pick this on a log-uniform
# distribution between 1/(n_words + 1) and 1/2. This means that some
# sequences will be chopped roughly and others finely.
min_log_p_break = -tf.math.log(tf.cast(n_words, tf.float32) + 2.0)
max_log_p_break = -tf.math.log(2.0)
p_break = tf.exp(
tf.random.stateless_uniform(
[],
minval=min_log_p_break,
maxval=max_log_p_break,
seed=seeds[0])
)
# craffel@ says that there may be bugs in random.uniform making it not
# really uniform. This doesn't seem horribly important here, but may
# need another look.
breaks = tf.less(
tf.random.stateless_uniform([n_words - 1], seed=seeds[1]),
p_break)
def one_random_break():
pos = tf.random.stateless_uniform(
[],
minval=0,
maxval=n_words - 1,
dtype=tf.int32,
seed=seeds[2])
return tf.one_hot(pos, n_words - 1,
dtype=tf.bool, on_value=True, off_value=False)
breaks = tf.cond(
tf.math.reduce_any(breaks), lambda: breaks, one_random_break)
breaks = tf.concat([[True], breaks], axis=0)
word_to_seq_id = tf.math.mod(tf.math.cumsum(tf.cast(breaks, tf.int32)), 2)
# separators:
# if in your segment: ' '
# if break to other segment: ' X'
# else: ''
results = []
for seq_id in [0, 1]:
in_my_seq = tf.equal(word_to_seq_id, seq_id)
separator_strings = tf.where(
in_my_seq,
' ',
tf.where(breaks, ' X', '')
)
word_strings = tf.where(in_my_seq, words, '')
all_strings = tf.stack([separator_strings, word_strings], axis=1)
results.append(tf.strings.substr(
tf.strings.reduce_join(all_strings), 1, tf.int32.max))
inputs = tf.stack([tf.strings.join([label, results[0]]),
tf.strings.join([label, results[1]])])
targets = tf.stack([results[1], results[0]])
return {'inputs': inputs, 'targets': targets}
dataset = split_text_to_words(dataset, text_key, min_num_words=2)
return my_fn(dataset).unbatch() | Create a dataset consisting of fill-in-the-blank text examples. The input examples should have a key text_key associated with a tf.string value. The output examples have keys 'inputs' and 'targets'. The input string is split on whitespace to form a sequence of words. This sequence is chopped randomly into segments of one or more words. Alternate segments are included in the inputs and targets, with a special word 'X' marking a missing segment. The given label is prepended to the inputs. Each input string produces two examples - one the inverse of the other. Inputs with less than two words are dropped. EXAMPLE: input: { 'text': 'The fat cat sat on the mat.' } outputs: { 'inputs': 'fill: The fat X the X' 'targets': 'X cat sat on X mat.' } { 'inputs': 'fill: X cat sat on X mat.' 'targets': 'The fat X the X' } Args: dataset: a tf.data.Dataset text_key: a string, the key for the text feature to preprocess in the dataset examples. label: a string, the label to prepend to the inputs. Returns: a tf.data.Dataset |
45 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def split_text_to_words(dataset, text_key='text', min_num_words=2):
"""Split text to words and filter out examples with too few words."""
def split(x):
res = dict(x)
res['words'] = tf.strings.split([x[text_key]]).values
return res
dataset = dataset.map(split, num_parallel_calls=AUTOTUNE)
return dataset.filter(lambda x: tf.size(x['words']) >= min_num_words)
The provided code snippet includes necessary dependencies for implementing the `fill_in_the_blank_sized` function. Write a Python function `def fill_in_the_blank_sized( dataset, size_bins=(1, 2, 4, 8, 16, 32, 64, 128, 256, 512), text_key='text', label='fill: ')` to solve the following problem:
Fill in the blank preprocessor that labels blank with a binned size. The actual blank size is sampled uniformly from the inclusive range of the min and max bin. The blank is then filled in with the closest bin size to the actual blank size. Args: dataset: a tf.data.Dataset, the dataset to preprocess. size_bins: a list, a list of blank sizes to select from when labelling the blank. text_key: a string, the key for the text feature to preprocess in the dataset examples. label: a string, the label to prepend to the inputs. Returns: a tf.data.Dataset
Here is the function:
def fill_in_the_blank_sized(
dataset,
size_bins=(1, 2, 4, 8, 16, 32, 64, 128, 256, 512),
text_key='text',
label='fill: '):
"""Fill in the blank preprocessor that labels blank with a binned size.
The actual blank size is sampled uniformly from the inclusive range of the min
and max bin. The blank is then filled in with the closest bin size to the
actual blank size.
Args:
dataset: a tf.data.Dataset, the dataset to preprocess.
size_bins: a list, a list of blank sizes to select from when labelling the
blank.
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
label: a string, the label to prepend to the inputs.
Returns:
a tf.data.Dataset
"""
bins = sorted(size_bins)
@seqio.map_over_dataset(num_seeds=2)
def my_fn(x, seeds):
"""Apply transformation."""
words = x['words']
n_words = tf.size(words)
blank_size = tf.random.stateless_uniform(
[],
minval=bins[0],
maxval=tf.math.minimum(n_words, bins[-1]),
dtype=tf.dtypes.int32,
seed=seeds[0])
bin_delta = tf.math.abs(bins - blank_size)
bin_ = tf.gather(bins, tf.argmin(bin_delta))
blank_start = tf.random.stateless_uniform(
[],
minval=0,
maxval=tf.math.maximum(0, n_words-blank_size) + 1,
dtype=tf.dtypes.int32,
seed=seeds[1])
pre_blank = tf.strings.reduce_join(words[0:blank_start], separator=' ')
post_blank = tf.strings.reduce_join(
words[blank_start+blank_size:], separator=' ')
blank = tf.strings.format('_{}_', bin_)
# We strip to handle cases where blank is at beginning or end.
input_ = tf.strings.strip(
tf.strings.join([pre_blank, blank, post_blank], ' '))
input_ = tf.strings.join([label, input_])
target = tf.strings.reduce_join(
words[blank_start:blank_start+blank_size], separator=' ')
return {
'inputs': tf.strings.strip(input_),
'targets': tf.strings.strip(target)}
dataset = split_text_to_words(dataset, text_key, min_num_words=2)
# Filter out examples with fewer words than the minimum.
dataset = dataset.filter(lambda x: tf.size(x['words']) >= bins[0])
return my_fn(dataset) | Fill in the blank preprocessor that labels blank with a binned size. The actual blank size is sampled uniformly from the inclusive range of the min and max bin. The blank is then filled in with the closest bin size to the actual blank size. Args: dataset: a tf.data.Dataset, the dataset to preprocess. size_bins: a list, a list of blank sizes to select from when labelling the blank. text_key: a string, the key for the text feature to preprocess in the dataset examples. label: a string, the label to prepend to the inputs. Returns: a tf.data.Dataset |
46 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
def translate(x, source_language, target_language):
"""Convert a translation dataset to a text2text pair.
For example, say the dataset returns examples of this format:
{'de': 'Das ist gut.', 'en': 'That is good.'}
If source_language = 'de', target_language = 'en', then the outputs will have
the format:
{'inputs': 'translate German to English: Das ist gut.',
'targets': 'That is good.'}
Args:
x: an example to process.
source_language: source language code (e.g. 'en') to translate from.
target_language: target language code (e.g. 'de') to translate to.
Returns:
A preprocessed example with the format listed above.
"""
# Language codes like zh-cn are not supported; use only the first 2 chars
for language in (source_language, target_language):
if language != language[:2]:
logging.warning(
'Extended language code %s not supported. Falling back on %s.',
language, language[:2]
)
lang_id_to_string = {
source_language: babel.Locale(source_language[:2]).english_name,
target_language: babel.Locale(target_language[:2]).english_name,
}
src_str = 'translate {}'.format(lang_id_to_string[source_language])
tgt_str = ' to {}: '.format(lang_id_to_string[target_language])
return {
'inputs': tf.strings.join([src_str, tgt_str, x[source_language]]),
'targets': x[target_language],
}
The provided code snippet includes necessary dependencies for implementing the `multi_translate` function. Write a Python function `def multi_translate(dataset, source_language, target_language)` to solve the following problem:
Convert a multi-translate dataset to a text2text pair. For example, say the dataset returns examples which have a 'translations' feature key so that examples have the following format: { ... 'translations': { 'language': ['de', 'fr', 'en'], 'translation': ['Das ist gut.', 'Ca c'est bon', 'That is good.'] }, ... } If source_language = 'de', target_language = 'en', then this function will return examples of the format: {'inputs': 'translate German to English: Das is gut.', 'targets': 'That is good.'} Any other languages present in the dataset will be filtered out. Args: dataset: a tf.data.Dataset to process. source_language: source language code (e.g. 'en') to translate from. target_language: target language code (e.g. 'de') to translate to. Returns: A preprocessed tf.data.Dataset with the format listed above.
Here is the function:
def multi_translate(dataset, source_language, target_language):
"""Convert a multi-translate dataset to a text2text pair.
For example, say the dataset returns examples which have a 'translations'
feature key so that examples have the following format:
{
...
'translations': {
'language': ['de', 'fr', 'en'],
'translation': ['Das ist gut.', 'Ca c'est bon', 'That is good.']
},
...
}
If source_language = 'de', target_language = 'en', then this function will
return examples of the format:
{'inputs': 'translate German to English: Das is gut.',
'targets': 'That is good.'}
Any other languages present in the dataset will be filtered out.
Args:
dataset: a tf.data.Dataset to process.
source_language: source language code (e.g. 'en') to translate from.
target_language: target language code (e.g. 'de') to translate to.
Returns:
A preprocessed tf.data.Dataset with the format listed above.
"""
def filter_fn(x):
langs = x['translations']['language']
# Test whether both source/target_language appear in the language list
source_in_langs = tf.reduce_any(tf.equal(source_language, langs))
target_in_langs = tf.reduce_any(tf.equal(target_language, langs))
return tf.logical_and(source_in_langs, target_in_langs)
def map_fn(x):
langs = x['translations']['language']
# Retrieve the index in langs where source/target_language appears
src_idx = tf.squeeze(tf.where(tf.equal(langs, source_language)))
tgt_idx = tf.squeeze(tf.where(tf.equal(langs, target_language)))
return {
source_language: x['translations']['translation'][src_idx],
target_language: x['translations']['translation'][tgt_idx],
}
dataset = dataset.filter(filter_fn)
dataset = dataset.map(map_fn, num_parallel_calls=AUTOTUNE)
return translate(dataset, source_language, target_language) | Convert a multi-translate dataset to a text2text pair. For example, say the dataset returns examples which have a 'translations' feature key so that examples have the following format: { ... 'translations': { 'language': ['de', 'fr', 'en'], 'translation': ['Das ist gut.', 'Ca c'est bon', 'That is good.'] }, ... } If source_language = 'de', target_language = 'en', then this function will return examples of the format: {'inputs': 'translate German to English: Das is gut.', 'targets': 'That is good.'} Any other languages present in the dataset will be filtered out. Args: dataset: a tf.data.Dataset to process. source_language: source language code (e.g. 'en') to translate from. target_language: target language code (e.g. 'de') to translate to. Returns: A preprocessed tf.data.Dataset with the format listed above. |
47 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `definite_pronoun_resolution_simple` function. Write a Python function `def definite_pronoun_resolution_simple(x, label='wsc:')` to solve the following problem:
Converts DPR examples to a simple text to text format. A typical example from the definite pronoun resolution dataset might look like { 'sentence': 'Bob asked Tom if he can lend some money.', 'pronoun': 'he', 'candidates': ['Bob', 'Tom'], 'label': 1, } This will be transformed to { 'inputs': 'wsc: Bob asked Tom if *he* can lend some money.' 'targets': 'Tom', } Args: x: an example to process. label: a string, the label to prepend to the inputs. Returns: A preprocessed example.
Here is the function:
def definite_pronoun_resolution_simple(x, label='wsc:'):
"""Converts DPR examples to a simple text to text format.
A typical example from the definite pronoun resolution dataset might look like
{
'sentence': 'Bob asked Tom if he can lend some money.',
'pronoun': 'he',
'candidates': ['Bob', 'Tom'],
'label': 1,
}
This will be transformed to
{
'inputs': 'wsc: Bob asked Tom if *he* can lend some money.'
'targets': 'Tom',
}
Args:
x: an example to process.
label: a string, the label to prepend to the inputs.
Returns:
A preprocessed example.
"""
# If there are multiple instances of the pronoun in the sentence, the first
# one is the one that needs to be resolved.
inputs = [
label,
tf.strings.regex_replace(
x['sentence'],
tf.strings.join([r' (', x['pronoun'], r')( |\.|,)']),
r' *\1*\2',
replace_global=False,
),
]
return {
'inputs': tf.strings.join(inputs, separator=' '),
'targets': x['candidates'][x['label']],
} | Converts DPR examples to a simple text to text format. A typical example from the definite pronoun resolution dataset might look like { 'sentence': 'Bob asked Tom if he can lend some money.', 'pronoun': 'he', 'candidates': ['Bob', 'Tom'], 'label': 1, } This will be transformed to { 'inputs': 'wsc: Bob asked Tom if *he* can lend some money.' 'targets': 'Tom', } Args: x: an example to process. label: a string, the label to prepend to the inputs. Returns: A preprocessed example. |
48 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def neighboring_pairs(dataset, text_key='text', reuse_sentences=True):
"""Create a dataset consisting of neighboring sentence pairs.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'first' and 'second'.
We only take sentence pairs from within the same line since lines seem to
represent paragraph-like structures in our text datasets. Empty lines and
1-sentence lines will thus be ignored.
The argument reuse_sentences determines whether a sentence can be used as both
the first and last element in the pair. For example, the input with sentences
A,B,C,D will return (A,B),(B,C),(C,D) if reuse_sentences is True and
(A,B),(C,D) if reuse_sentences is False.
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
reuse_sentences: a boolean
Returns:
a tf.data.Dataset
"""
def split_by_lines(dataset):
"""Splits text in dataset by line, removing empty lines."""
def my_fn(text):
lines = tf.strings.split([text], sep='\n').values
return tf.strings.strip(lines)
dataset = dataset.map(my_fn, num_parallel_calls=AUTOTUNE)
dataset = dataset.unbatch()
return dataset.filter(lambda x: tf.strings.length(x) > 0)
def split_into_pairs(line):
"""Split a given text example into pairs of neighboring sentences."""
# TODO(mmatena): Use better sentence segmentation.
sep = str(uuid.uuid4())
sentences = tf.strings.regex_replace(line, r'((?:\.|\!|\?)+)', r'\1' + sep)
sentences = tf.strings.strip(tf.strings.split([sentences], sep).values)
if reuse_sentences:
firsts = sentences[:-1]
seconds = sentences[1:]
else:
firsts = sentences[:-1:2]
seconds = sentences[1::2]
return {
'first': firsts,
'second': seconds,
}
def example_len(x):
return tf.math.minimum(
tf.strings.length(x['first']), tf.strings.length(x['second']))
# Split by lines.
dataset = dataset.map(lambda x: x[text_key], num_parallel_calls=AUTOTUNE)
dataset = split_by_lines(dataset)
# Get pairs of neighboring sentences.
dataset = dataset.map(split_into_pairs, num_parallel_calls=AUTOTUNE)
dataset = dataset.unbatch()
# Remove examples with empty strings.
dataset = dataset.filter(lambda x: example_len(x) > 0)
return dataset
The provided code snippet includes necessary dependencies for implementing the `next_sentence_prediction` function. Write a Python function `def next_sentence_prediction(dataset, text_key='text', reuse_sentences=True, label_sentences=False, p_neighbors=0.5, label='nsp: ', buffer_size=50000)` to solve the following problem:
Create a dataset containing a next sentence prediction objective. The input examples should have a key text_key associated with a tf.string value. The output examples have keys 'inputs' and 'targets'. EXAMPLE OUTPUTS: { input: "nsp: sentence1: The man went to the store. sentence2: Penguins are " "flightless birds.", target: "not_next" } The "sentence1:" and "sentence2:" labels will be omitted if label_sentences is False. Args: dataset: a tf.data.Dataset text_key: a string, the key for the text feature to preprocess in the dataset examples. reuse_sentences: a boolean, see docs for `neighboring_pairs` for more info. label_sentences: a boolean p_neighbors: a float between 0 and 1, the probability that a sentence pair will be neighbors. label: a string, the label to prepend to the inputs. buffer_size: an int, the size of the shuffle buffer used to get non-neighboring sentences. Returns: a tf.data.Dataset
Here is the function:
def next_sentence_prediction(dataset,
text_key='text',
reuse_sentences=True,
label_sentences=False,
p_neighbors=0.5,
label='nsp: ',
buffer_size=50000):
"""Create a dataset containing a next sentence prediction objective.
The input examples should have a key text_key associated with a tf.string
value.
The output examples have keys 'inputs' and 'targets'.
EXAMPLE OUTPUTS:
{
input: "nsp: sentence1: The man went to the store. sentence2: Penguins are "
"flightless birds.",
target: "not_next"
}
The "sentence1:" and "sentence2:" labels will be omitted if label_sentences is
False.
Args:
dataset: a tf.data.Dataset
text_key: a string, the key for the text feature to preprocess in the
dataset examples.
reuse_sentences: a boolean, see docs for `neighboring_pairs` for more info.
label_sentences: a boolean
p_neighbors: a float between 0 and 1, the probability that a sentence pair
will be neighbors.
label: a string, the label to prepend to the inputs.
buffer_size: an int, the size of the shuffle buffer used to get
non-neighboring sentences.
Returns:
a tf.data.Dataset
"""
sentence1_label, sentence2_label = '', ''
if label_sentences:
sentence1_label, sentence2_label = 'sentence1: ', 'sentence2: '
empty = tf.constant('', dtype=tf.string, shape=[1])
dataset = neighboring_pairs(
dataset, text_key=text_key, reuse_sentences=reuse_sentences)
dataset = dataset.shuffle(buffer_size).batch(2, drop_remainder=True)
def some_are_empty(*tensors):
"""See if at least one tensor has shape [0]."""
empty = [tf.equal(tf.size(t), 0) for t in tensors]
return tf.reduce_any(empty)
@seqio.map_over_dataset(num_seeds=1)
def my_fn(x, seed):
"""Function to be applied to each example in dataset."""
use_neighbors = (
tf.random.stateless_uniform(shape=[], seed=seed) < p_neighbors
)
firsts, seconds = tf.cond(
use_neighbors,
lambda: (x['first'], x['second']),
lambda: (x['first'], tf.stack([x['second'][1], x['second'][0]])),
)
relation_label = tf.cond(
use_neighbors,
lambda: 'next',
lambda: 'not_next',
)
inputs = []
for i in range(2):
first_inputs = firsts[i]
second_inputs = seconds[i]
def create_examples(first_i=first_inputs, second_i=second_inputs):
return tf.strings.join([
label,
sentence1_label,
first_i,
' ',
sentence2_label,
second_i,
])
inpt = tf.cond(
some_are_empty(first_inputs, second_inputs),
lambda: empty,
create_examples,
)
inputs.append(tf.strings.strip(inpt))
inputs = tf.reshape(inputs, [-1])
targets = tf.reshape(2 * [relation_label], [-1])
return {'inputs': inputs, 'targets': targets}
dataset = my_fn(dataset).unbatch()
def example_len(x):
return tf.math.minimum(
tf.strings.length(x['inputs']), tf.strings.length(x['targets']))
# Remove examples with empty strings.
return dataset.filter(lambda x: example_len(x) > 0) | Create a dataset containing a next sentence prediction objective. The input examples should have a key text_key associated with a tf.string value. The output examples have keys 'inputs' and 'targets'. EXAMPLE OUTPUTS: { input: "nsp: sentence1: The man went to the store. sentence2: Penguins are " "flightless birds.", target: "not_next" } The "sentence1:" and "sentence2:" labels will be omitted if label_sentences is False. Args: dataset: a tf.data.Dataset text_key: a string, the key for the text feature to preprocess in the dataset examples. reuse_sentences: a boolean, see docs for `neighboring_pairs` for more info. label_sentences: a boolean p_neighbors: a float between 0 and 1, the probability that a sentence pair will be neighbors. label: a string, the label to prepend to the inputs. buffer_size: an int, the size of the shuffle buffer used to get non-neighboring sentences. Returns: a tf.data.Dataset |
49 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `lm` function. Write a Python function `def lm(x)` to solve the following problem:
Basic language modeling objective for text - empty inputs. Given inputs with the format: {"text": "Here is some text."} This preprocess produces examples with the format {"inputs": "", "targets": "Here is some text."} Args: x: an example to process. Returns: A preprocessed example.
Here is the function:
def lm(x):
"""Basic language modeling objective for text - empty inputs.
Given inputs with the format:
{"text": "Here is some text."}
This preprocess produces examples with the format
{"inputs": "", "targets": "Here is some text."}
Args:
x: an example to process.
Returns:
A preprocessed example.
"""
return {'inputs': '', 'targets': x['text']} | Basic language modeling objective for text - empty inputs. Given inputs with the format: {"text": "Here is some text."} This preprocess produces examples with the format {"inputs": "", "targets": "Here is some text."} Args: x: an example to process. Returns: A preprocessed example. |
50 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
def _wsc_inputs(x):
"""Given an example from SuperGLUE WSC, compute the 'inputs' value.
The output will look like a fill in the blank with the pronoun blanked out.
For example, the text
'Mitchell asked Tom if he could lend some money.'
would be transformed to
'Mitchell asked Tom if X could lend some money.'
Args:
x: A dict that is an example from the WSC task of SuperGLUE.
Returns:
A scalar string tensor.
"""
words = tf.strings.split([x['text']], sep=' ').values
# We would need some special logic to handle the case where the pronoun is the
# first or last word in the text. None of the examples in WSC seem to have
# this, so we are ignoring these cases.
with tf.control_dependencies([
tf.assert_greater(x['span2_index'], 0),
tf.assert_less(x['span2_index'], tf.size(words)),
]):
pronoun_index = tf.identity(x['span2_index'])
def create_input():
with tf.control_dependencies(
[tf.assert_equal(words[pronoun_index], x['span2_text'])]):
return tf.strings.join(
[
tf.strings.reduce_join(words[:pronoun_index], separator=' '),
'X',
tf.strings.reduce_join(
words[pronoun_index + 1:], separator=' '),
],
separator=' ',
)
# Handle some special cases.
if tf.equal(
x['text'],
'The boy continued to whip the pony , and eventually the pony threw him over. John laughed out quite loud. \"Good for him,\" he said. '
):
return (
'The boy continued to whip the pony , and eventually the pony threw '
'him over. John laughed out quite loud. "Good for X ," he said.'
)
# Using the span2_index, we get 'use' instead of 'it'.
if tf.equal(
x['text'],
'When they had eventually calmed down a bit , and had gotten home, Mr. Farley put the magic pebble in an iron safe . Some day they might want to use it , but really for now, what more could they wish for?'
):
return (
'When they had eventually calmed down a bit , and had gotten home, '
'Mr. Farley put the magic pebble in an iron safe . Some day they might '
'want to use X , but really for now, what more could they wish for?'
)
return create_input()
The provided code snippet includes necessary dependencies for implementing the `wsc_simple` function. Write a Python function `def wsc_simple(dataset, label='wsc:', correct_referent_only=False)` to solve the following problem:
Converts SuperGLUE WSC examples to a simple text to text format. A typical example from SuperGLUE WSC might look like { 'text': 'Mitchell asked Tom if he could lend some money.', 'span1_text': 'Tom', 'span2_text': 'he', 'span2_index': 4, } This will be transformed to { 'inputs': 'wsc: Bob asked Tom if *he* can lend some money.' 'targets': 'Tom', } The targets will always be the text of the referent regardless of whether it is the correct referrent of the pronoun. Thus for training purposes, please set `correct_referent_only` to be True. Args: dataset: a tf.data.Dataset label: a string, the label to prepend to the inputs. correct_referent_only: a bool, whether to filter out examples for which the targets is not the correct referent of the pronoun. Returns: a tf.data.Dataset
Here is the function:
def wsc_simple(dataset,
label='wsc:',
correct_referent_only=False):
"""Converts SuperGLUE WSC examples to a simple text to text format.
A typical example from SuperGLUE WSC might look like
{
'text': 'Mitchell asked Tom if he could lend some money.',
'span1_text': 'Tom',
'span2_text': 'he',
'span2_index': 4,
}
This will be transformed to
{
'inputs': 'wsc: Bob asked Tom if *he* can lend some money.'
'targets': 'Tom',
}
The targets will always be the text of the referent regardless of whether it
is the correct referrent of the pronoun. Thus for training purposes, please
set `correct_referent_only` to be True.
Args:
dataset: a tf.data.Dataset
label: a string, the label to prepend to the inputs.
correct_referent_only: a bool, whether to filter out examples for which the
targets is not the correct referent of the pronoun.
Returns:
a tf.data.Dataset
"""
def map_fn(x):
"""Function to be called for every example in dataset."""
inputs = [
label,
tf.strings.regex_replace(
_wsc_inputs(x), r' X ', ' *' + x['span2_text'] + '* '),
]
referent = x['span1_text']
return {
'inputs': tf.strings.join(inputs, separator=' '),
# The reshape is necessary as otherwise the tensor has unknown rank.
'targets': tf.reshape(referent, shape=[]),
'label': x.get('label', 0),
'idx': x['idx'],
}
if correct_referent_only:
dataset = dataset.filter(lambda x: tf.cast(x.get('label', False), tf.bool))
return dataset.map(map_fn, num_parallel_calls=AUTOTUNE) | Converts SuperGLUE WSC examples to a simple text to text format. A typical example from SuperGLUE WSC might look like { 'text': 'Mitchell asked Tom if he could lend some money.', 'span1_text': 'Tom', 'span2_text': 'he', 'span2_index': 4, } This will be transformed to { 'inputs': 'wsc: Bob asked Tom if *he* can lend some money.' 'targets': 'Tom', } The targets will always be the text of the referent regardless of whether it is the correct referrent of the pronoun. Thus for training purposes, please set `correct_referent_only` to be True. Args: dataset: a tf.data.Dataset label: a string, the label to prepend to the inputs. correct_referent_only: a bool, whether to filter out examples for which the targets is not the correct referent of the pronoun. Returns: a tf.data.Dataset |
51 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `wnli_simple` function. Write a Python function `def wnli_simple(x, label='wsc:')` to solve the following problem:
Converts GLUE WNLI examples to a simple text to text format. A typical example from WNLI might look like: { 'sentence1': 'The fish ate the worm. It was tasty.', 'sentence2': 'The worm was tasty.', 'label': 1, } This will be transformed to: { 'inputs': 'wsc: The fish ate the worm. *It* was tasty.', 'targets': 'The worm', 'premise': 'The fish ate the worm. It was tasty., 'hypothesis': 'The worm was tasty.', 'label': 1, } This preprocessor has been manually verified to produce reasonable WSC examples for the dev and test sets. Tasks using this preprocessor should only be used eval and not train. Args: x: an example to process. label: a string, the label to prepend to the inputs. Returns: A preprocessed example.
Here is the function:
def wnli_simple(x, label='wsc:'):
"""Converts GLUE WNLI examples to a simple text to text format.
A typical example from WNLI might look like:
{
'sentence1': 'The fish ate the worm. It was tasty.',
'sentence2': 'The worm was tasty.',
'label': 1,
}
This will be transformed to:
{
'inputs': 'wsc: The fish ate the worm. *It* was tasty.',
'targets': 'The worm',
'premise': 'The fish ate the worm. It was tasty.,
'hypothesis': 'The worm was tasty.',
'label': 1,
}
This preprocessor has been manually verified to produce reasonable WSC
examples for the dev and test sets. Tasks using this preprocessor should only
be used eval and not train.
Args:
x: an example to process.
label: a string, the label to prepend to the inputs.
Returns:
A preprocessed example.
"""
pronouns = ['he', 'she', 'they', 'it', 'her', 'his', 'their', 'them', 'him']
PronounMatch = collections.namedtuple( # pylint: disable=invalid-name
'PronounMatch', ['score', 'index_in_premise', 'candidate'])
def split_clean(s):
"""Returns array of words with punctuation and capitalization removed."""
words = [
re.sub(r'(\.|,|\?|\!)$', '', w) for w in s.strip().lower().split(' ')
]
return [w for w in words if w]
def get_all_pronoun_indices(s):
return [i for i, w in enumerate(s) if w in pronouns]
def get_post_match_size(hypothesis, words):
"""Returns len of largest prefix of words that is substr of hypothesis."""
hypothesis = ' '.join(hypothesis)
for i in range(len(words)):
if ' '.join(words[:i + 1]) not in hypothesis:
return i
return len(words)
def get_pre_match_size(hypothesis, words):
"""Returns len of largest suffix of words that is substr of hypothesis."""
return get_post_match_size(hypothesis[::-1], words[::-1])
def get_pronoun_match(premise, hypothesis, index):
"""Return the PronounMatch for the pronoun at `index` in premise."""
pre, post = premise[:index], premise[index + 1:]
pre_match_size = get_pre_match_size(hypothesis, pre)
post_match_size = get_post_match_size(hypothesis, post)
score = pre_match_size + post_match_size
candidate = ''
if score:
pre_match = pre[-pre_match_size or len(pre):]
post_match = post[:post_match_size]
m = re.search(' '.join(pre_match + [r'(.+)'] + post_match),
' '.join(hypothesis))
if not m:
# Handle cases where the candidate is at the start of the hypthesis.
m = re.search(' '.join([r'^(.+)'] + post_match), ' '.join(hypothesis))
if not m:
# Handle cases where the candidate is at the end of the hypthesis.
m = re.search(' '.join(pre_match + [r'(.+)$']), ' '.join(hypothesis))
if m:
candidate = m.group(1)
return PronounMatch(
score=score, index_in_premise=index, candidate=candidate)
def get_best_pronoun_match(premise, hypothesis):
"""Returns the match for the pronoun in the premise to disambiguate."""
pronoun_indices = get_all_pronoun_indices(premise)
scoredpronouns = [
get_pronoun_match(premise, hypothesis, index)
for index in pronoun_indices
]
return max(scoredpronouns, key=lambda x: x.score)
def highlight(sentence, index):
words = sentence.split(' ')
word = words[index]
if word[-1] in ['.', ',', '!', '?']:
highlighted = '*{}* {}'.format(word[:-1], word[-1])
else:
highlighted = '*{}*'.format(word)
return ' '.join(words[:index] + [highlighted] + words[index + 1:])
def make_nonpossessive(word):
# WSC simple targets will never be possessive, even when the pronoun is
# possesive.
if word.endswith("'"):
return word[:-1]
elif word.endswith("'s"):
return word[:-2]
else:
return word
def clean_up(candidate):
words = candidate.split(' ')
# Sometimes the candidate extraction messes up, and the candidate will start
# with the start of the hypothesis and extend to the correct candidate. We
# can try to clean up the candidate in some cases by removing everything up
# to the last article in the sentence.
article_index = max(
[words.index(art) for art in {'a', 'an', 'the'} if art in words] or [0])
return ' '.join(words[article_index:])
def process_candidate(candidate, hypothesis):
"""Handles special cases and adds proper punctuation/capitalization."""
candidate = clean_up(candidate)
pattern = '({})'.format(' '.join([
r'{}(?:\.|,|\?|\!)?'.format(re.escape(c)) for c in candidate.split(' ')
]))
m = re.search(pattern, hypothesis, re.IGNORECASE)
if not m:
raise ValueError(
'Unable to find candidate "{}" in hypothesis "{}".'.format(
candidate, hypothesis))
candidate = m.group(1)
if candidate and candidate[-1] in ['.', ',', '!', '?']:
candidate = candidate[:-1]
return make_nonpossessive(candidate)
def compute_inputs_and_targets(premise, hypothesis):
"""Compute inputs and targets for WNLI simple."""
premise = tf.compat.as_text(premise.numpy())
hypothesis = tf.compat.as_text(hypothesis.numpy())
match = get_best_pronoun_match(
split_clean(premise), split_clean(hypothesis))
targets = process_candidate(match.candidate, hypothesis)
inputs = '{} {}'.format(label, highlight(premise, match.index_in_premise))
return inputs, targets
inputs, targets = tf.py_function(
compute_inputs_and_targets,
inp=[x['sentence1'], x['sentence2']],
Tout=[tf.string, tf.string])
return {
# The reshape is necessary as otherwise the tensor has unknown rank.
'inputs': tf.reshape(inputs, shape=[]),
'targets': tf.reshape(targets, shape=[]),
'premise': x['sentence1'],
'hypothesis': x['sentence2'],
'label': x.get('label', 0),
'idx': x['idx'],
} | Converts GLUE WNLI examples to a simple text to text format. A typical example from WNLI might look like: { 'sentence1': 'The fish ate the worm. It was tasty.', 'sentence2': 'The worm was tasty.', 'label': 1, } This will be transformed to: { 'inputs': 'wsc: The fish ate the worm. *It* was tasty.', 'targets': 'The worm', 'premise': 'The fish ate the worm. It was tasty., 'hypothesis': 'The worm was tasty.', 'label': 1, } This preprocessor has been manually verified to produce reasonable WSC examples for the dev and test sets. Tasks using this preprocessor should only be used eval and not train. Args: x: an example to process. label: a string, the label to prepend to the inputs. Returns: A preprocessed example. |
52 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def rank_classification(
ds: tf.data.Dataset,
inputs_fn: Callable[[FeatureType], tf.Tensor],
targets_fn: Callable[[FeatureType], tf.Tensor],
is_correct_fn: Callable[[FeatureType], tf.Tensor],
weight_fn: Optional[Callable[[FeatureType], tf.Tensor]] = None,
mode: str = 'eval',
passthrough_feature_keys: Optional[Sequence[str]] = None,
) -> tf.data.Dataset:
"""Prepare dataset for rank classification scoring.
Intended to be used with `rank_classification` postprocessor and metric.
`inputs_fn` and `targets_fn` must return the 'inputs' and 'targets' features,
respectively, for each possible class label given the raw example features.
'is_correct_fn' must return the 'is_correct' feature, a boolean for whether
each label is matching with the ground truth target before the examples are
expanded.
In 'train' mode, only the inputs / targets marked correct will be produced.
In 'eval' mode, all inputs / targets will be produced.
In 'fewshot_eval', all inputs / targets will be produced as a single batch.
Each output example will also be given a unique 'idx' feature. The first dim
is a sequential index for the input example and the second is the index of the
generated output for it. E.g., the second output example from the fourth input
example would be `[3, 1]`.
To be clear, consider the following arguments:
inputs_fn=lambda ex: ex['prefix'],
targets_fn=lambda ex: ex['suffix'],
is_correct_fn=lambda ex: tf.one_hot(ex['label'], num_classes)
weight_fn=lambda ex: ex['weight']
Given the following example:
{
'prefix': ['The farmland needed ', 'The farmland wanted '],
'suffix': ['water', 'cows'],
'label': 0,
'weight': 1.0,
}
the preprocessor would return:
[{
'idx': [0, 0],
'inputs': 'The farmland needed ',
'targets': 'water',
'is_correct': True,
'weight': 1.0
},
{
'idx': [0, 1],
'inputs': 'The farmland wanted ',
'targets': 'cows',
'is_correct': False,
'weight': 1.0
}]
With mode set to 'train', it would return only the first example,
since it uses the correct label. With mode set to 'fewshot_eval', it would
return both examples in a single batch.
Args:
ds: a tf.data.Dataset to preprocess.
inputs_fn: a callable that returns the 'inputs' features for each label
given the input example.
targets_fn: a callable that returns the 'targets' features for each label
given the input example.
is_correct_fn: a callable that returns the 'label' feature. May be an int32
scalar or 1-D Tensor.
weight_fn: a callable that returns the 'weight' feature (float32 scalar).
mode: A string, one of 'train' or'eval 'train' produces only the correct
example(s) based on the label value(s). 'eval' produces an example for
every possible class value, sequentially. 'fewshot_eval' produces an
example for every possible class value, batched together for each input
example.
passthrough_feature_keys: a sequence of feature names that should be passed
through to the output of this preprocessor. eg: ["starburst", "tokens"]
Returns:
A tf.data.Dataset containing 'idx', inputs', 'targets', and 'is_correct'.
"""
if mode not in ('train', 'eval', 'fewshot_eval'):
raise ValueError(
"Mode must be one of 'train', 'eval', or 'fewshot_eval'. "
f"Got '{mode}'.")
def make_examples(idx, ex):
inputs = inputs_fn(ex)
targets = targets_fn(ex)
is_correct = tf.cast(is_correct_fn(ex), tf.bool)
tf.debugging.assert_equal(
tf.size(is_correct), [tf.size(inputs), tf.size(targets)],
'`inputs_fn`, `targets_fn`, and `is_correct_fn` must return the same '
'size tensors.')
num_out = tf.size(is_correct)
in_idx = tf.fill([num_out], tf.cast(idx, tf.int32))
out_idx = tf.range(num_out)
output = {
'idx': tf.stack([in_idx, out_idx], 1),
'inputs': inputs,
'targets': targets,
'is_correct': is_correct,
}
if weight_fn is not None:
output['weight'] = tf.fill(tf.shape(is_correct), weight_fn(ex))
output['weight'] = tf.cast(output['weight'], tf.float32)
for feature_name in passthrough_feature_keys or []:
if feature_name in output:
raise ValueError(
f'The feature {feature_name} to pass through, already exists'
'in the preprocessed output. Try renaming it to something else.'
)
tiled_shape = tf.concat(
[
tf.expand_dims(tf.shape(targets)[0], axis=0),
tf.ones(len(ex[feature_name].shape), dtype=tf.int32),
],
axis=0,
)
output[feature_name] = tf.tile(
tf.expand_dims(ex[feature_name], axis=0), tiled_shape
)
return output
ds = ds.enumerate()
ds = ds.map(make_examples, num_parallel_calls=AUTOTUNE)
if mode != 'fewshot_eval':
ds = ds.unbatch()
if mode == 'train':
ds = ds.filter(lambda ex: ex['is_correct'])
return ds
The provided code snippet includes necessary dependencies for implementing the `rank_classification_formatter` function. Write a Python function `def rank_classification_formatter( ds: tf.data.Dataset, inputs_formats: Union[str, Sequence[str]], targets_formats: Union[str, Sequence[str]], mode: str = 'eval', label_key: str = 'label', weight_key: Optional[str] = None) -> tf.data.Dataset` to solve the following problem:
Create 'inputs' and 'targets' strings for ranking classification. Intended to be used with `rank_classification` postprocessor and metric. Inputs will be formatted by filling in the feature values in the `inputs_formats` and `targets_formats` strings. Nested features can be accessed by concatenating the features using forward slash. For eg: if sub-sub-key is nested under sub-key, which is nested under key, then sub-sub-key can be accessed using key/sub-key/sub-sub-key. In 'eval' mode, a separate example will be produced for each targets / inputs format string. These can then be scored to find the one with the highest likelihood. The `rank_classification` postprocessor and metric allow you to evaluate with this technique. In 'train' mode, only the targets / inputs format string indexed by the label(s) will be produced. In 'eval' mode, all inputs / targets will be produced. Each input example will also be given a unique, sequential index called 'idx'. For example, with arguments: ``` inputs_format='{premise} What is the {question}? X', targets_formats=[ 'I think {choice1}.', 'I think {choice2}.' ], mode='eval' ``` given the input: { 'premise': 'The farmland needed irrigation.', 'question': 'effect', 'choice1' : 'a canal was constructed', 'choice2': 'the crops grew tall', 'label': 0, } the preprocessor would return: [{ 'idx': 0, 'inputs': 'The farmland needed irrigation. What is the effect? X', 'targets': 'I think a canal was constructed.', 'is_correct': True }, { 'idx': 0, 'inputs': 'The farmland needed irrigation. What is the effect? X', 'targets': 'I think the crops grew tall.', 'is_correct': False }] With `mode='train'`, it would return only the first example, since it uses the correct label. With `mode='fewshot_eval'`, it would return both examples in a single batch. Args: ds: a tf.data.Dataset to preprocess. inputs_formats: A string or a list of strings to format with feature values to produce 'inputs'. Feature keys should be surrounded by curly braces to be replaced. targets_formats: A string or a list of strings to format with feature values to produce 'targets', one for each possible class value. Feature keys should be surrounded by curly braces to be replaced. mode: A string, one of 'train', 'eval', or 'fewshot_train') 'train' produces only the correct example(s) based on the label value(s). 'eval' produces an example for every possible class value, sequentially. 'fewshot_eval': produces an example for every possible class value, batched together for each input example. label_key: A string, the feature key for the integer label value(s). weight_key: A string, the feature key for the float example weight. Returns: A tf.data.Dataset containing 'idx', inputs', 'targets', and 'is_correct'.
Here is the function:
def rank_classification_formatter(
ds: tf.data.Dataset,
inputs_formats: Union[str, Sequence[str]],
targets_formats: Union[str, Sequence[str]],
mode: str = 'eval',
label_key: str = 'label',
weight_key: Optional[str] = None) -> tf.data.Dataset:
"""Create 'inputs' and 'targets' strings for ranking classification.
Intended to be used with `rank_classification` postprocessor and metric.
Inputs will be formatted by filling in the feature values in the
`inputs_formats` and `targets_formats` strings.
Nested features can be accessed by concatenating the features using forward
slash. For eg: if sub-sub-key is nested under sub-key, which is nested under
key, then sub-sub-key can be accessed using key/sub-key/sub-sub-key.
In 'eval' mode, a separate example will be produced for each targets / inputs
format string. These can then be scored to find the one with the highest
likelihood. The `rank_classification` postprocessor and metric allow you to
evaluate with this technique.
In 'train' mode, only the targets / inputs format string indexed by the
label(s) will be produced. In 'eval' mode, all inputs / targets will be
produced.
Each input example will also be given a unique, sequential index called 'idx'.
For example, with arguments:
```
inputs_format='{premise} What is the {question}? X',
targets_formats=[
'I think {choice1}.',
'I think {choice2}.'
],
mode='eval'
```
given the input:
{
'premise': 'The farmland needed irrigation.',
'question': 'effect',
'choice1' : 'a canal was constructed',
'choice2': 'the crops grew tall',
'label': 0,
}
the preprocessor would return:
[{
'idx': 0,
'inputs': 'The farmland needed irrigation. What is the effect? X',
'targets': 'I think a canal was constructed.',
'is_correct': True
},
{
'idx': 0,
'inputs': 'The farmland needed irrigation. What is the effect? X',
'targets': 'I think the crops grew tall.',
'is_correct': False
}]
With `mode='train'`, it would return only the first example,
since it uses the correct label.
With `mode='fewshot_eval'`, it would return both examples in a single batch.
Args:
ds: a tf.data.Dataset to preprocess.
inputs_formats: A string or a list of strings to format with feature values
to produce 'inputs'. Feature keys should be surrounded by curly braces to
be replaced.
targets_formats: A string or a list of strings to format with feature values
to produce 'targets', one for each possible class value. Feature keys
should be surrounded by curly braces to be replaced.
mode: A string, one of 'train', 'eval', or 'fewshot_train') 'train' produces
only the correct example(s) based on the label value(s). 'eval' produces
an example for every possible class value, sequentially.
'fewshot_eval': produces an example for every possible class value,
batched together for each input example.
label_key: A string, the feature key for the integer label value(s).
weight_key: A string, the feature key for the float example weight.
Returns:
A tf.data.Dataset containing 'idx', inputs', 'targets', and 'is_correct'.
"""
if (isinstance(inputs_formats, (list, tuple)) and
isinstance(targets_formats, (list, tuple))):
if len(inputs_formats) != len(targets_formats):
raise ValueError(
f'The inputs_formats ({len(inputs_formats)}) and '
f'targets_formats ({len(targets_formats)}) are both instances '
'of list or tuple, but do not have matching lengths.')
elif isinstance(inputs_formats, (list, tuple)):
num_classes = len(inputs_formats)
targets_formats = [targets_formats] * num_classes
elif isinstance(targets_formats, (list, tuple)):
num_classes = len(targets_formats)
inputs_formats = [inputs_formats] * num_classes
else:
raise ValueError(
'One of the inputs_formats and targets_formats has to '
f'be a list or tuple, inputs_formats: {inputs_formats}, '
f'target_formats: {targets_formats}.')
def _format_str(features, fmt):
keys = set(re.findall(r'{(\S+)}', fmt))
s = fmt
for k in keys:
value = features
for subkey in k.split('/'):
value = value[subkey]
if not isinstance(value, tf.Tensor):
raise ValueError(
f'Final value of key \'{k}\' must be a tf.string. '
f'Got: {type(value).__name__}')
tf.debugging.assert_type(
value, tf.string,
f'Final value of key \'{k}\' must be a tf.string. '
f'Got: {value.dtype.name}')
s = tf.strings.regex_replace(s, '{%s}' % k, value)
return s
def _apply_formats(features, fmts):
return [_format_str(features, fmt) for fmt in fmts]
def _is_correct_fn(ex):
labels = ex[label_key]
is_correct = tf.one_hot(labels, num_classes, on_value=True, off_value=False)
if labels.shape.rank:
is_correct = tf.math.reduce_any(is_correct, axis=0)
return is_correct
def _weight_fn(ex):
return ex[weight_key]
return rank_classification(
ds,
inputs_fn=functools.partial(_apply_formats, fmts=inputs_formats),
targets_fn=functools.partial(_apply_formats, fmts=targets_formats),
is_correct_fn=_is_correct_fn,
weight_fn=None if weight_key is None else _weight_fn,
mode=mode) | Create 'inputs' and 'targets' strings for ranking classification. Intended to be used with `rank_classification` postprocessor and metric. Inputs will be formatted by filling in the feature values in the `inputs_formats` and `targets_formats` strings. Nested features can be accessed by concatenating the features using forward slash. For eg: if sub-sub-key is nested under sub-key, which is nested under key, then sub-sub-key can be accessed using key/sub-key/sub-sub-key. In 'eval' mode, a separate example will be produced for each targets / inputs format string. These can then be scored to find the one with the highest likelihood. The `rank_classification` postprocessor and metric allow you to evaluate with this technique. In 'train' mode, only the targets / inputs format string indexed by the label(s) will be produced. In 'eval' mode, all inputs / targets will be produced. Each input example will also be given a unique, sequential index called 'idx'. For example, with arguments: ``` inputs_format='{premise} What is the {question}? X', targets_formats=[ 'I think {choice1}.', 'I think {choice2}.' ], mode='eval' ``` given the input: { 'premise': 'The farmland needed irrigation.', 'question': 'effect', 'choice1' : 'a canal was constructed', 'choice2': 'the crops grew tall', 'label': 0, } the preprocessor would return: [{ 'idx': 0, 'inputs': 'The farmland needed irrigation. What is the effect? X', 'targets': 'I think a canal was constructed.', 'is_correct': True }, { 'idx': 0, 'inputs': 'The farmland needed irrigation. What is the effect? X', 'targets': 'I think the crops grew tall.', 'is_correct': False }] With `mode='train'`, it would return only the first example, since it uses the correct label. With `mode='fewshot_eval'`, it would return both examples in a single batch. Args: ds: a tf.data.Dataset to preprocess. inputs_formats: A string or a list of strings to format with feature values to produce 'inputs'. Feature keys should be surrounded by curly braces to be replaced. targets_formats: A string or a list of strings to format with feature values to produce 'targets', one for each possible class value. Feature keys should be surrounded by curly braces to be replaced. mode: A string, one of 'train', 'eval', or 'fewshot_train') 'train' produces only the correct example(s) based on the label value(s). 'eval' produces an example for every possible class value, sequentially. 'fewshot_eval': produces an example for every possible class value, batched together for each input example. label_key: A string, the feature key for the integer label value(s). weight_key: A string, the feature key for the float example weight. Returns: A tf.data.Dataset containing 'idx', inputs', 'targets', and 'is_correct'. |
53 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `parse_tsv` function. Write a Python function `def parse_tsv(line, field_names=None, field_delim='\t', field_columns=None)` to solve the following problem:
Splits TSV lines into dict examples mapping field name to string value. Args: line: an example containing a comma/tab-delimited string. field_names: a list of strings, the ordered names of the TSV fields. Defaults to "inputs" and "targets". field_delim: a string, the delimiter to split on e.g. ',' for csv. field_columns: a list of column indices for each field. Defaults to consecutive numbering of the provided `field_names`. Returns: A feature dict mapping field name to string value.
Here is the function:
def parse_tsv(line, field_names=None, field_delim='\t', field_columns=None):
"""Splits TSV lines into dict examples mapping field name to string value.
Args:
line: an example containing a comma/tab-delimited string.
field_names: a list of strings, the ordered names of the TSV fields.
Defaults to "inputs" and "targets".
field_delim: a string, the delimiter to split on e.g. ',' for csv.
field_columns: a list of column indices for each field.
Defaults to consecutive numbering of the provided `field_names`.
Returns:
A feature dict mapping field name to string value.
"""
field_names = field_names or ['inputs', 'targets']
field_columns = field_columns or list(range(len(field_names)))
return dict(
zip(field_names,
tf.io.decode_csv(
line,
record_defaults=[''] * len(field_names),
field_delim=field_delim,
use_quote_delim=False,
select_cols=field_columns))) | Splits TSV lines into dict examples mapping field name to string value. Args: line: an example containing a comma/tab-delimited string. field_names: a list of strings, the ordered names of the TSV fields. Defaults to "inputs" and "targets". field_delim: a string, the delimiter to split on e.g. ',' for csv. field_columns: a list of column indices for each field. Defaults to consecutive numbering of the provided `field_names`. Returns: A feature dict mapping field name to string value. |
54 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `preprocess_tsv` function. Write a Python function `def preprocess_tsv(line, field_delim='\t', num_fields=2, inputs_format='{0}', targets_format='{1}', field_names=None, use_quote_delim=False)` to solve the following problem:
r"""Parse tab-delimited strings into inputs and targets. This function takes a tf.data.Dataset of strings, each of which contains tab-delimited fields. The function returns a tf.data.Dataset of feature dictionaries of the form {"inputs": string, "targets": string}. inputs_format contains a template string and field numbers or names used to produce the "inputs" string. targets_format contains a template string and field numbers or names used to produce the "targets" string. Example (field numbers): The input dataset contains the lines: "6,7,42" "2,9,18" preprocess_tsv(dataset, field_delim=',', inputs_format='numerator: {2} denominator: {1}', targets_format='quotient: {0}' would produce a dataset containing the dictionaries: {"inputs": "numerator: 42 denominator: 7", "targets": "quotient: 6"} {"inputs": "numerator: 18 denominator: 9", "targets": "quotient: 2"} Example (field names): The input dataset contains the lines: "6,7,42" "2,9,18" preprocess_tsv(dataset, field_delim=',', field_names=['quot', 'denom', 'numer'], inputs_format='numerator: {numer} denominator: {denom}', targets_format='quotient: {quot}' would produce a dataset containing the dictionaries: {"inputs": "numerator: 42 denominator: 7", "targets": "quotient: 6"} {"inputs": "numerator: 18 denominator: 9", "targets": "quotient: 2"} Args: line: an example containing comma/tab-delimited string. field_delim: a string, the delimiter to split on e.g. ',' for csv. num_fields: an integer inputs_format: a string, the desired output format with placeholders for field values. targets_format: a string, the desired output format with placeholders for field values. field_names: a list of strings, the ordered names of the TSV fields. defaults to None (i.e. use field number in *_format) use_quote_delim: If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). Returns: A feature dict with 'inputs' and 'targets' features.
Here is the function:
def preprocess_tsv(line,
field_delim='\t',
num_fields=2,
inputs_format='{0}',
targets_format='{1}',
field_names=None,
use_quote_delim=False):
r"""Parse tab-delimited strings into inputs and targets.
This function takes a tf.data.Dataset of strings, each of which contains
tab-delimited fields. The function returns a tf.data.Dataset of feature
dictionaries of the form {"inputs": string, "targets": string}.
inputs_format contains a template string and field numbers or names used to
produce the "inputs" string.
targets_format contains a template string and field numbers or names used to
produce the "targets" string.
Example (field numbers):
The input dataset contains the lines:
"6,7,42"
"2,9,18"
preprocess_tsv(dataset,
field_delim=',',
inputs_format='numerator: {2} denominator: {1}',
targets_format='quotient: {0}'
would produce a dataset containing the dictionaries:
{"inputs": "numerator: 42 denominator: 7", "targets": "quotient: 6"}
{"inputs": "numerator: 18 denominator: 9", "targets": "quotient: 2"}
Example (field names):
The input dataset contains the lines:
"6,7,42"
"2,9,18"
preprocess_tsv(dataset,
field_delim=',',
field_names=['quot', 'denom', 'numer'],
inputs_format='numerator: {numer} denominator: {denom}',
targets_format='quotient: {quot}'
would produce a dataset containing the dictionaries:
{"inputs": "numerator: 42 denominator: 7", "targets": "quotient: 6"}
{"inputs": "numerator: 18 denominator: 9", "targets": "quotient: 2"}
Args:
line: an example containing comma/tab-delimited string.
field_delim: a string, the delimiter to split on e.g. ',' for csv.
num_fields: an integer
inputs_format: a string, the desired output format with placeholders for
field values.
targets_format: a string, the desired output format with placeholders for
field values.
field_names: a list of strings, the ordered names of the TSV fields.
defaults to None (i.e. use field number in *_format)
use_quote_delim: If false, treats double quotation marks as regular
characters inside of the string fields (ignoring RFC 4180, Section 2,
Bullet 5).
Returns:
A feature dict with 'inputs' and 'targets' features.
"""
def _format_part_with_field_numbers(part, field_values):
found = re.findall(r'{(\d+)}', part)
if found:
return field_values[int(found[0])]
else:
return part
def _format_part_with_field_names(part, field_names, field_values):
field_names_re = '|'.join(['{{({})}}'.format(x) for x in field_names])
found = re.findall(field_names_re, part)
if found:
pos = field_names.index(''.join(found[0]))
return field_values[int(pos)]
else:
return part
def _format(format_string, field_names, field_values):
if field_names is None:
parts = [
_format_part_with_field_numbers(p, field_values)
for p in re.split(r'({\d+})', format_string)
]
else:
field_names_re = '(' + '|'.join(['{{{}}}'.format(x) for x in field_names
]) + ')'
parts = [
_format_part_with_field_names(p, field_names, field_values)
for p in re.split(field_names_re, format_string)
]
return tf.strings.join(parts)
field_values = tf.io.decode_csv(
line,
record_defaults=[''] *
(num_fields if field_names is None else len(field_names)),
field_delim=field_delim,
use_quote_delim=use_quote_delim)
return {
'inputs': _format(inputs_format, field_names, field_values),
'targets': _format(targets_format, field_names, field_values)
} | r"""Parse tab-delimited strings into inputs and targets. This function takes a tf.data.Dataset of strings, each of which contains tab-delimited fields. The function returns a tf.data.Dataset of feature dictionaries of the form {"inputs": string, "targets": string}. inputs_format contains a template string and field numbers or names used to produce the "inputs" string. targets_format contains a template string and field numbers or names used to produce the "targets" string. Example (field numbers): The input dataset contains the lines: "6,7,42" "2,9,18" preprocess_tsv(dataset, field_delim=',', inputs_format='numerator: {2} denominator: {1}', targets_format='quotient: {0}' would produce a dataset containing the dictionaries: {"inputs": "numerator: 42 denominator: 7", "targets": "quotient: 6"} {"inputs": "numerator: 18 denominator: 9", "targets": "quotient: 2"} Example (field names): The input dataset contains the lines: "6,7,42" "2,9,18" preprocess_tsv(dataset, field_delim=',', field_names=['quot', 'denom', 'numer'], inputs_format='numerator: {numer} denominator: {denom}', targets_format='quotient: {quot}' would produce a dataset containing the dictionaries: {"inputs": "numerator: 42 denominator: 7", "targets": "quotient: 6"} {"inputs": "numerator: 18 denominator: 9", "targets": "quotient: 2"} Args: line: an example containing comma/tab-delimited string. field_delim: a string, the delimiter to split on e.g. ',' for csv. num_fields: an integer inputs_format: a string, the desired output format with placeholders for field values. targets_format: a string, the desired output format with placeholders for field values. field_names: a list of strings, the ordered names of the TSV fields. defaults to None (i.e. use field number in *_format) use_quote_delim: If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). Returns: A feature dict with 'inputs' and 'targets' features. |
55 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def select_random_chunk(dataset: tf.data.Dataset,
output_features: Mapping[str, seqio.Feature],
max_length: Optional[int] = None,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[
Sequence[str]] = None,
sequence_length: Optional[Mapping[str, int]] = None,
uniform_random_start: bool = False,
min_length: Optional[int] = None,
**unused_kwargs) -> tf.data.Dataset:
"""SeqIO wrapper for single_example_select_random_chunk()."""
def _my_fn(x, seed):
return single_example_select_random_chunk(
x,
seed,
output_features=output_features,
max_length=max_length,
feature_key=feature_key,
additional_feature_keys=additional_feature_keys,
passthrough_feature_keys=passthrough_feature_keys,
sequence_length=sequence_length,
uniform_random_start=uniform_random_start,
min_length=min_length)
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
return _my_fn(dataset)
def reduce_concat_tokens(dataset,
feature_key='targets',
batch_size=128,
**unused_kwargs):
"""Token-preprocessor to concatenate multiple unrelated documents.
If we want to generate examples of exactly the right length,
(to avoid wasting space on padding), then we use this function, folowed by
split_tokens.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
feature_key: an string
batch_size: an integer - how many documents to concatenate into one
Returns:
a dataset
"""
dataset = dataset.map(
lambda x: {feature_key: x[feature_key]}, num_parallel_calls=AUTOTUNE)
dataset = dataset.padded_batch(batch_size, padded_shapes={feature_key: [-1]})
def _my_fn(x):
tokens = tf.reshape(x[feature_key], [-1])
# strip padding
tokens = tf.boolean_mask(tokens, tf.cast(tokens, tf.bool))
return {feature_key: tokens}
return dataset.map(_my_fn, num_parallel_calls=AUTOTUNE)
def split_tokens(dataset: tf.data.Dataset,
min_tokens_per_segment: Optional[int] = None,
max_tokens_per_segment: int = gin.REQUIRED,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[Sequence[str]] = None,
**unused_kwargs) -> tf.data.Dataset:
"""Split examples into multiple examples each.
The intended use case is to break up long examples for use in unsupervised
transfer-learning.
This function is generally preceded by select_random_chunk.
If min_tokens_per_segment is provided, the segment length is chosen randomly
per document from a log-uniform distribution. If min_tokens_per_segment is
None, then the segment length is max_tokens_per_segment (except for a possibly
shorter last segment in each document).
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
min_tokens_per_segment: an optional integer
max_tokens_per_segment: an integer, the maximum number of tokens in each
segment. Only the final segment may be shorter.
feature_key: a string, the feature to split
additional_feature_keys: Additional features to split. The same chunk size
will be used, so they should be the same size as feature_key.
passthrough_feature_keys: Features to pass through without any splitting.
Returns:
a dataset
"""
if passthrough_feature_keys:
split_keys = set([feature_key] + (additional_feature_keys or []))
overlap_keys = split_keys & set(passthrough_feature_keys)
if overlap_keys:
raise ValueError(
f'split keys {overlap_keys} also included in passthrough keys')
def _split_tokens(x, seed):
"""Split one token sequence into multiple sequences."""
tokens = x[feature_key]
n_tokens = tf.shape(tokens)[0]
if min_tokens_per_segment is None:
length = max_tokens_per_segment
else:
# pick a length - log-uniformly distributed
length = tf.cast(
tf.exp(
tf.random.stateless_uniform(
[],
minval=math.log(min_tokens_per_segment),
maxval=math.log(max_tokens_per_segment),
seed=seed
)
),
tf.int32)
# Pad to a multiple of length, then use tf.reshape to split up the tokens
# into num_segments segments each of the given length.
num_segments = tf.cast(
tf.math.ceil(
tf.cast(n_tokens, tf.float32) / tf.cast(length, tf.float32))
,
tf.int32)
padding = num_segments * length - tf.shape(tokens)[0]
feature_keys_to_split = [feature_key]
orig_lengths = {}
outputs = {}
if additional_feature_keys is not None:
feature_keys_to_split.extend(additional_feature_keys)
for k in feature_keys_to_split:
with tf.control_dependencies([
tf.assert_equal(
tf.shape(tokens)[0],
tf.shape(x[k])[0],
message=(f'Additional feature {k} is not the same size as '
f'{feature_key} along axis 0 in split_tokens().')
)
]):
shape = tf.shape(x[k])[1:]
shape_list = x[k].shape[1:]
padded = tf.pad(
x[k],
tf.concat([[[0, padding]],
tf.zeros([len(shape_list), 2], dtype=tf.int32)],
axis=0))
orig_lengths[k] = tf.concat(
[tf.repeat(length, num_segments - 1), [length - padding]], axis=0)
outputs[k] = tf.reshape(
padded, tf.concat([[-1, length], shape], axis=0))
# To avoid memory issues, don't just replicate the passthrough features
# for every segment; use tf.data to do it so the copies don't get
# instantiated all at once.
outputs_ds = tf.data.Dataset.from_tensor_slices(outputs)
orig_lengths_ds = tf.data.Dataset.from_tensor_slices(orig_lengths)
if passthrough_feature_keys:
passthrough = {k: v for k, v in x.items()
if k in passthrough_feature_keys}
passthrough_ds = tf.data.Dataset.from_tensors(passthrough).repeat(
tf.cast(num_segments, tf.int64))
return tf.data.Dataset.zip((outputs_ds, orig_lengths_ds, passthrough_ds))
else:
return tf.data.Dataset.zip((outputs_ds, orig_lengths_ds))
def _strip_padding_and_merge_passthrough(
inputs, orig_lengths, passthrough=None):
output = {}
for k, v in inputs.items():
output[k] = v[:orig_lengths[k]]
if passthrough:
for k, v in passthrough.items():
output[k] = passthrough[k]
return output
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
dataset = _split_tokens(dataset).flat_map(lambda z: z)
dataset = dataset.map(
_strip_padding_and_merge_passthrough, num_parallel_calls=AUTOTUNE)
return dataset
def random_spans_helper(inputs_length=gin.REQUIRED,
noise_density=gin.REQUIRED,
mean_noise_span_length=gin.REQUIRED,
extra_tokens_per_span_inputs=gin.REQUIRED,
extra_tokens_per_span_targets=gin.REQUIRED,
verbose=False):
"""Training parameters to avoid padding with random_spans_noise_mask.
When training a model with random_spans_noise_mask, we would like to set the
other training hyperparmeters in a way that avoids padding. This function
helps us compute these hyperparameters.
We assume that each noise span in the input is replaced by
extra_tokens_per_span_inputs sentinel tokens, and each non-noise span in the
targets is replaced by extra_tokens_per_span_targets sentinel tokens.
This function tells us the required number of tokens in the raw example (for
split_tokens()) as well as the length of the encoded targets.
Note that this function assumes the inputs and targets will have EOS appended
and includes that in the reported length.
Args:
inputs_length: an integer - desired length of the tokenized inputs sequence
noise_density: a float
mean_noise_span_length: a float
extra_tokens_per_span_inputs: an integer
extra_tokens_per_span_targets: an integer
verbose: a bool indicating whether to log sequence lengths
Returns:
tokens_length: length of original text in tokens
targets_length: an integer - length in tokens of encoded targets sequence
"""
def _tokens_length_to_inputs_length_targets_length(tokens_length):
num_noise_tokens = int(round(tokens_length * noise_density))
num_nonnoise_tokens = tokens_length - num_noise_tokens
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# inputs contain all nonnoise tokens, sentinels for all noise spans
# and one EOS token.
return (
num_nonnoise_tokens +
num_noise_spans * extra_tokens_per_span_inputs + 1,
num_noise_tokens +
num_noise_spans * extra_tokens_per_span_targets + 1)
tokens_length = inputs_length - 1
while (_tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0]
<= inputs_length):
tokens_length += 1
inputs_length, targets_length = (
_tokens_length_to_inputs_length_targets_length(tokens_length))
# minor hack to get the targets length to be equal to inputs length
# which is more likely to have been set to a nice round number.
if noise_density == 0.5 and targets_length > inputs_length:
tokens_length -= 1
targets_length -= 1
if verbose:
logging.info(
'tokens_length=%s inputs_length=%s targets_length=%s '
'noise_density=%s mean_noise_span_length=%s ',
tokens_length, inputs_length, targets_length,
noise_density, mean_noise_span_length)
return tokens_length, targets_length
def denoise(dataset,
output_features,
noise_density=gin.REQUIRED,
noise_mask_fn=gin.REQUIRED,
inputs_fn=gin.REQUIRED,
targets_fn=None,
passthrough_feature_keys: Optional[Sequence[str]] = None,
input_feature_key='inputs',
**unused_kwargs):
"""SeqIO wrapper for single_example_denoise()."""
def my_fn(features, seed):
return single_example_denoise(
features,
seed,
output_features=output_features,
noise_density=noise_density,
noise_mask_fn=noise_mask_fn,
inputs_fn=inputs_fn,
targets_fn=targets_fn,
passthrough_feature_keys=passthrough_feature_keys,
input_feature_key=input_feature_key)
return my_fn(dataset)
def random_spans_noise_mask(length,
noise_density,
seeds,
mean_noise_span_length=3.0,
random_roll=False):
"""Noise mask consisting of random spans of noise tokens.
The number of noise tokens and the number of noise spans and non-noise spans
are determined deterministically as follows:
num_noise_tokens = round(length * noise_density)
num_nonnoise_spans = num_noise_spans = round(
num_noise_tokens / mean_noise_span_length)
Spans alternate between non-noise and noise, beginning with non-noise.
Subject to the above restrictions, all masks are equally likely.
Args:
length: an int32 scalar (length of the incoming token sequence)
noise_density: a float - approximate density of output mask
seeds: an int32 Tensor, shaped (2, 2)
mean_noise_span_length: a number
random_roll: bool, whether to roll the mask by a random integer offset in
[0, length). Set random_roll to True to get a more uniform distribution
of masked positions. Specifically, when random_roll is False (default) and
a single span is enough to satisfy the noise density requirement, this
fuction masks only the last few positions.
Returns:
a boolean tensor with shape [length]
"""
if noise_density == 0.0:
return tf.zeros(length, tf.bool)
orig_length = length
# increase length to avoid degeneracy
length = tf.maximum(length, 2)
def to_int(x):
return tf.cast(x, tf.int32)
def to_float(x):
return tf.cast(x, tf.float32)
num_noise_tokens = to_int(tf.round(to_float(length) * noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = tf.minimum(tf.maximum(num_noise_tokens, 1), length - 1)
num_noise_spans = to_int(
tf.round(to_float(num_noise_tokens) / mean_noise_span_length))
# avoid degeneracy by ensuring positive number of noise spans
num_noise_spans = tf.maximum(num_noise_spans, 1)
num_nonnoise_tokens = length - num_noise_tokens
# pick the lengths of the noise spans and the non-noise spans
def _random_segmentation(num_items, num_segments, seed):
"""Partition a sequence of items randomly into non-empty segments.
Args:
num_items: an integer scalar > 0
num_segments: an integer scalar in [1, num_items]
seed: an integer seed
Returns:
a Tensor with shape [num_segments] containing positive integers that add
up to num_items
"""
first_in_segment = tf.pad(
seqio.stateless_shuffle(
to_int(tf.range(num_items - 1) < num_segments - 1),
seed),
[[1, 0]])
segment_id = tf.cumsum(first_in_segment)
segment_length = tf.math.segment_sum(tf.ones_like(segment_id), segment_id)
return segment_length
noise_span_lengths = _random_segmentation(
num_noise_tokens, num_noise_spans, seeds[0])
nonnoise_span_lengths = _random_segmentation(
num_nonnoise_tokens, num_noise_spans, seeds[1])
interleaved_span_lengths = tf.reshape(
tf.stack([nonnoise_span_lengths, noise_span_lengths], axis=1),
[num_noise_spans * 2])
span_starts = tf.cumsum(interleaved_span_lengths)[:-1]
span_start_indicator = tf.math.unsorted_segment_sum(
tf.ones_like(span_starts), span_starts, length)
span_num = tf.cumsum(span_start_indicator)
is_noise = tf.equal(span_num % 2, 1)
mask = is_noise[:orig_length]
if random_roll:
roll_seed = (seeds[0][0]+seeds[1][1], seeds[0][1]-seeds[1][0]) # new seed.
# Roll the mask by a random offset e.g. for offset=2: [1,2,3,4] => [3,4,1,2]
offset = tf.random.stateless_uniform(
[1], seed=roll_seed, dtype=tf.int32, minval=0, maxval=length)[0]
mask = tf.roll(mask, shift=offset, axis=0)
return mask
def noise_span_to_unique_sentinel(tokens, noise_mask, vocabulary, seeds):
"""Replace each run of consecutive noise tokens with a different sentinel.
The idea here is to be able to align the dropped spans in the inputs
with the markers in the targets.
We want to generate training examples like
"We hold X to be Y that" -> "X these truths Y self evident Z"
Sentinels assigned in decreasing order within the sequence starting at
vocabulary.size - 1. That is, we appropriate the last tokens in the
vocabulary for additional use as sentinels.
TODO(noam): we may want to try enlarging the vocabulary and leaving room
for the sentinels instead. However, this requires enlarging the embedding
tables in the model, so that is a bigger change.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del seeds
prev_token_is_noise = tf.pad(noise_mask[:-1], [[1, 0]])
first_noise_tokens = tf.logical_and(
noise_mask, tf.logical_not(prev_token_is_noise))
subsequent_noise_tokens = tf.logical_and(noise_mask, prev_token_is_noise)
sentinel = sentinel_id(vocabulary) + 1 - tf.cumsum(
tf.cast(first_noise_tokens, tokens.dtype))
tokens = tf.where(first_noise_tokens, sentinel, tokens)
return tf.boolean_mask(tokens, tf.logical_not(subsequent_noise_tokens))
def nonnoise_span_to_unique_sentinel(tokens, noise_mask, vocabulary, seeds):
return noise_span_to_unique_sentinel(
tokens, tf.logical_not(noise_mask), vocabulary, seeds)
The provided code snippet includes necessary dependencies for implementing the `span_corruption` function. Write a Python function `def span_corruption(dataset, sequence_length, output_features, mean_noise_span_length=3.0, noise_density=0.15, input_feature_key='inputs', merge_examples_to_reduce_padding=True, reserved_for_packing=None, passthrough_feature_keys: Optional[Sequence[str]] = None)` to solve the following problem:
Final pretraining objective used in Raffel et al., 2019. Args: dataset: A tf.data.Dataset with dictionaries containing the key `input_feature_key`. sequence_length: dict mapping of feature key to int length for that feature. output_features: mapping of keys to features. mean_noise_span_length: the mean number of tokens per masked span per example. noise_density: what fraction of the tokens to mask. input_feature_key: which feature to use from the dataset as the input text tokens. merge_examples_to_reduce_padding: if True, combines multiple input examples to reduce padding. reserved_for_packing: if specified, reduces the desired inputs length by the specified amount to enable multiple examples to be packed together downstream. passthrough_feature_keys: a sequence of feature names that should be passed through to the output of this preprocessor. eg: ["tokens"]. Only supported if `merge_examples_to_reduce_padding` is set to False. Returns: a dataset
Here is the function:
def span_corruption(dataset,
sequence_length,
output_features,
mean_noise_span_length=3.0,
noise_density=0.15,
input_feature_key='inputs',
merge_examples_to_reduce_padding=True,
reserved_for_packing=None,
passthrough_feature_keys: Optional[Sequence[str]] = None):
"""Final pretraining objective used in Raffel et al., 2019.
Args:
dataset: A tf.data.Dataset with dictionaries containing the key
`input_feature_key`.
sequence_length: dict mapping of feature key to int length for that feature.
output_features: mapping of keys to features.
mean_noise_span_length: the mean number of tokens per masked span per
example.
noise_density: what fraction of the tokens to mask.
input_feature_key: which feature to use from the dataset as the input text
tokens.
merge_examples_to_reduce_padding: if True, combines multiple input examples
to reduce padding.
reserved_for_packing: if specified, reduces the desired inputs length by the
specified amount to enable multiple examples to be packed together
downstream.
passthrough_feature_keys: a sequence of feature names that should be passed
through to the output of this preprocessor. eg: ["tokens"]. Only
supported if `merge_examples_to_reduce_padding` is set to False.
Returns:
a dataset
"""
inputs_length = sequence_length[input_feature_key]
if reserved_for_packing:
inputs_length -= reserved_for_packing
input_length, targets_length = random_spans_helper(
extra_tokens_per_span_inputs=1,
extra_tokens_per_span_targets=1,
inputs_length=inputs_length,
mean_noise_span_length=mean_noise_span_length,
noise_density=noise_density)
if sequence_length['targets'] < targets_length:
raise ValueError(
f'Expected targets length for span corruption ({targets_length}) is '
f'greater than configured targets length '
f"({sequence_length['targets']})")
ds = dataset
ds = select_random_chunk(
ds,
output_features=output_features,
feature_key='targets',
max_length=65536,
passthrough_feature_keys=passthrough_feature_keys)
if merge_examples_to_reduce_padding:
if passthrough_feature_keys:
raise ValueError('passthrough_feature_keys not supported with '
'merge_examples_to_reduce_padding=True. '
f'Got: {passthrough_feature_keys}')
ds = reduce_concat_tokens(ds, feature_key='targets', batch_size=128)
ds = split_tokens(
ds,
feature_key='targets',
min_tokens_per_segment=None,
max_tokens_per_segment=input_length,
passthrough_feature_keys=passthrough_feature_keys)
ds = denoise(
ds,
output_features,
inputs_fn=noise_span_to_unique_sentinel,
targets_fn=nonnoise_span_to_unique_sentinel,
noise_density=noise_density,
noise_mask_fn=functools.partial(
random_spans_noise_mask,
mean_noise_span_length=mean_noise_span_length),
input_feature_key=input_feature_key,
passthrough_feature_keys=passthrough_feature_keys)
return ds | Final pretraining objective used in Raffel et al., 2019. Args: dataset: A tf.data.Dataset with dictionaries containing the key `input_feature_key`. sequence_length: dict mapping of feature key to int length for that feature. output_features: mapping of keys to features. mean_noise_span_length: the mean number of tokens per masked span per example. noise_density: what fraction of the tokens to mask. input_feature_key: which feature to use from the dataset as the input text tokens. merge_examples_to_reduce_padding: if True, combines multiple input examples to reduce padding. reserved_for_packing: if specified, reduces the desired inputs length by the specified amount to enable multiple examples to be packed together downstream. passthrough_feature_keys: a sequence of feature names that should be passed through to the output of this preprocessor. eg: ["tokens"]. Only supported if `merge_examples_to_reduce_padding` is set to False. Returns: a dataset |
56 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def select_random_chunk(dataset: tf.data.Dataset,
output_features: Mapping[str, seqio.Feature],
max_length: Optional[int] = None,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[
Sequence[str]] = None,
sequence_length: Optional[Mapping[str, int]] = None,
uniform_random_start: bool = False,
min_length: Optional[int] = None,
**unused_kwargs) -> tf.data.Dataset:
"""SeqIO wrapper for single_example_select_random_chunk()."""
def _my_fn(x, seed):
return single_example_select_random_chunk(
x,
seed,
output_features=output_features,
max_length=max_length,
feature_key=feature_key,
additional_feature_keys=additional_feature_keys,
passthrough_feature_keys=passthrough_feature_keys,
sequence_length=sequence_length,
uniform_random_start=uniform_random_start,
min_length=min_length)
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
return _my_fn(dataset)
def reduce_concat_tokens(dataset,
feature_key='targets',
batch_size=128,
**unused_kwargs):
"""Token-preprocessor to concatenate multiple unrelated documents.
If we want to generate examples of exactly the right length,
(to avoid wasting space on padding), then we use this function, folowed by
split_tokens.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
feature_key: an string
batch_size: an integer - how many documents to concatenate into one
Returns:
a dataset
"""
dataset = dataset.map(
lambda x: {feature_key: x[feature_key]}, num_parallel_calls=AUTOTUNE)
dataset = dataset.padded_batch(batch_size, padded_shapes={feature_key: [-1]})
def _my_fn(x):
tokens = tf.reshape(x[feature_key], [-1])
# strip padding
tokens = tf.boolean_mask(tokens, tf.cast(tokens, tf.bool))
return {feature_key: tokens}
return dataset.map(_my_fn, num_parallel_calls=AUTOTUNE)
def split_tokens_to_inputs_length(dataset, sequence_length,
output_features, **kwargs):
max_tokens = sequence_length['inputs']
if output_features['inputs'].add_eos:
# Leave room to insert an EOS token.
max_tokens -= 1
return split_tokens(dataset, max_tokens_per_segment=max_tokens, **kwargs)
def denoise(dataset,
output_features,
noise_density=gin.REQUIRED,
noise_mask_fn=gin.REQUIRED,
inputs_fn=gin.REQUIRED,
targets_fn=None,
passthrough_feature_keys: Optional[Sequence[str]] = None,
input_feature_key='inputs',
**unused_kwargs):
"""SeqIO wrapper for single_example_denoise()."""
def my_fn(features, seed):
return single_example_denoise(
features,
seed,
output_features=output_features,
noise_density=noise_density,
noise_mask_fn=noise_mask_fn,
inputs_fn=inputs_fn,
targets_fn=targets_fn,
passthrough_feature_keys=passthrough_feature_keys,
input_feature_key=input_feature_key)
return my_fn(dataset)
def iid_noise_mask(length, noise_density, seeds):
"""Independent and identically distributed token noise.
Args:
length: an int32 scalar.
noise_density: a float - approximate density of output mask.
seeds: an int32 Tensor, shaped (1, 2), the random seed.
Returns:
a boolean tensor with shape [length].
"""
return tf.random.stateless_uniform([length], seed=seeds[0]) < noise_density
def noise_span_to_unique_sentinel(tokens, noise_mask, vocabulary, seeds):
"""Replace each run of consecutive noise tokens with a different sentinel.
The idea here is to be able to align the dropped spans in the inputs
with the markers in the targets.
We want to generate training examples like
"We hold X to be Y that" -> "X these truths Y self evident Z"
Sentinels assigned in decreasing order within the sequence starting at
vocabulary.size - 1. That is, we appropriate the last tokens in the
vocabulary for additional use as sentinels.
TODO(noam): we may want to try enlarging the vocabulary and leaving room
for the sentinels instead. However, this requires enlarging the embedding
tables in the model, so that is a bigger change.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: a vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del seeds
prev_token_is_noise = tf.pad(noise_mask[:-1], [[1, 0]])
first_noise_tokens = tf.logical_and(
noise_mask, tf.logical_not(prev_token_is_noise))
subsequent_noise_tokens = tf.logical_and(noise_mask, prev_token_is_noise)
sentinel = sentinel_id(vocabulary) + 1 - tf.cumsum(
tf.cast(first_noise_tokens, tokens.dtype))
tokens = tf.where(first_noise_tokens, sentinel, tokens)
return tf.boolean_mask(tokens, tf.logical_not(subsequent_noise_tokens))
def nonnoise_span_to_unique_sentinel(tokens, noise_mask, vocabulary, seeds):
return noise_span_to_unique_sentinel(
tokens, tf.logical_not(noise_mask), vocabulary, seeds)
The provided code snippet includes necessary dependencies for implementing the `iid_denoising` function. Write a Python function `def iid_denoising(dataset, sequence_length, output_features)` to solve the following problem:
Baseline pretraining objective used in Raffel et al., 2019.
Here is the function:
def iid_denoising(dataset, sequence_length, output_features):
"""Baseline pretraining objective used in Raffel et al., 2019."""
ds = dataset
ds = select_random_chunk(ds, output_features=output_features,
feature_key='targets', max_length=65536)
ds = reduce_concat_tokens(ds, feature_key='targets', batch_size=128)
ds = split_tokens_to_inputs_length(ds, output_features=output_features,
sequence_length=sequence_length)
ds = denoise(
ds,
output_features,
inputs_fn=noise_span_to_unique_sentinel,
targets_fn=nonnoise_span_to_unique_sentinel,
noise_density=0.15,
noise_mask_fn=iid_noise_mask
)
return ds | Baseline pretraining objective used in Raffel et al., 2019. |
57 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def select_random_chunk(dataset: tf.data.Dataset,
output_features: Mapping[str, seqio.Feature],
max_length: Optional[int] = None,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[
Sequence[str]] = None,
sequence_length: Optional[Mapping[str, int]] = None,
uniform_random_start: bool = False,
min_length: Optional[int] = None,
**unused_kwargs) -> tf.data.Dataset:
"""SeqIO wrapper for single_example_select_random_chunk()."""
def _my_fn(x, seed):
return single_example_select_random_chunk(
x,
seed,
output_features=output_features,
max_length=max_length,
feature_key=feature_key,
additional_feature_keys=additional_feature_keys,
passthrough_feature_keys=passthrough_feature_keys,
sequence_length=sequence_length,
uniform_random_start=uniform_random_start,
min_length=min_length)
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
return _my_fn(dataset)
def split_tokens_to_inputs_length(dataset, sequence_length,
output_features, **kwargs):
max_tokens = sequence_length['inputs']
if output_features['inputs'].add_eos:
# Leave room to insert an EOS token.
max_tokens -= 1
return split_tokens(dataset, max_tokens_per_segment=max_tokens, **kwargs)
def denoise(dataset,
output_features,
noise_density=gin.REQUIRED,
noise_mask_fn=gin.REQUIRED,
inputs_fn=gin.REQUIRED,
targets_fn=None,
passthrough_feature_keys: Optional[Sequence[str]] = None,
input_feature_key='inputs',
**unused_kwargs):
"""SeqIO wrapper for single_example_denoise()."""
def my_fn(features, seed):
return single_example_denoise(
features,
seed,
output_features=output_features,
noise_density=noise_density,
noise_mask_fn=noise_mask_fn,
inputs_fn=inputs_fn,
targets_fn=targets_fn,
passthrough_feature_keys=passthrough_feature_keys,
input_feature_key=input_feature_key)
return my_fn(dataset)
def random_prefix_noise_mask(length, noise_density, seeds):
"""First part of the sequence is noise (for prefix_lm).
The length of the prefix is chosen uniformly between [1, length)
noise_density must be 0.5.
Args:
length: an int32 scalar.
noise_density: a float - must not exceed 0.5.
seeds: an int32 Tensor, shaped (1, 2), the random seed.
Returns:
a boolean tensor with shape [length].
"""
if noise_density > 0.5:
raise NotImplementedError(
'noise density must not exceed 0.5 for random_prefix_noise_mask')
max_input_tokens = length - 1
min_input_tokens = tf.minimum(
max_input_tokens,
tf.maximum(
1,
tf.cast(
tf.math.round((1 - 2 * noise_density) *
tf.cast(max_input_tokens, tf.float32)), tf.int32)))
num_input_tokens = tf.random.stateless_uniform(
[],
minval=min_input_tokens,
maxval=max_input_tokens + 1,
dtype=tf.int32,
seed=seeds[0])
return tf.range(length, dtype=tf.int32) < num_input_tokens
def drop_noise_tokens(tokens, noise_mask, vocabulary, seeds):
"""Drop noise tokens without inserting a sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: an unused vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del vocabulary
del seeds
return tf.boolean_mask(tokens, tf.logical_not(noise_mask))
def drop_nonnoise_tokens(tokens, noise_mask, vocabulary, seeds):
"""Drop non-noise tokens without inserting a sentinel.
Args:
tokens: a 1d integer Tensor
noise_mask: a boolean Tensor with the same shape as tokens
vocabulary: an unused vocabulary.Vocabulary
seeds: an unused int32 Tensor
Returns:
a Tensor with the same shape and dtype as tokens
"""
del vocabulary
del seeds
return tf.boolean_mask(tokens, noise_mask)
The provided code snippet includes necessary dependencies for implementing the `prefix_lm` function. Write a Python function `def prefix_lm(dataset, sequence_length, output_features, noise_density=0.5)` to solve the following problem:
Prefix language modeling objective used in Raffel et al. 2019.
Here is the function:
def prefix_lm(dataset, sequence_length, output_features,
noise_density=0.5):
"""Prefix language modeling objective used in Raffel et al. 2019."""
ds = dataset
ds = select_random_chunk(ds, output_features=output_features,
feature_key='targets', max_length=65536)
ds = split_tokens_to_inputs_length(ds, output_features=output_features,
sequence_length=sequence_length)
ds = denoise(
ds,
output_features,
inputs_fn=drop_nonnoise_tokens,
targets_fn=drop_noise_tokens,
noise_density=noise_density,
noise_mask_fn=random_prefix_noise_mask,
)
return ds | Prefix language modeling objective used in Raffel et al. 2019. |
58 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def select_random_chunk(dataset: tf.data.Dataset,
output_features: Mapping[str, seqio.Feature],
max_length: Optional[int] = None,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[
Sequence[str]] = None,
sequence_length: Optional[Mapping[str, int]] = None,
uniform_random_start: bool = False,
min_length: Optional[int] = None,
**unused_kwargs) -> tf.data.Dataset:
"""SeqIO wrapper for single_example_select_random_chunk()."""
def _my_fn(x, seed):
return single_example_select_random_chunk(
x,
seed,
output_features=output_features,
max_length=max_length,
feature_key=feature_key,
additional_feature_keys=additional_feature_keys,
passthrough_feature_keys=passthrough_feature_keys,
sequence_length=sequence_length,
uniform_random_start=uniform_random_start,
min_length=min_length)
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
return _my_fn(dataset)
def reduce_concat_tokens(dataset,
feature_key='targets',
batch_size=128,
**unused_kwargs):
"""Token-preprocessor to concatenate multiple unrelated documents.
If we want to generate examples of exactly the right length,
(to avoid wasting space on padding), then we use this function, folowed by
split_tokens.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
feature_key: an string
batch_size: an integer - how many documents to concatenate into one
Returns:
a dataset
"""
dataset = dataset.map(
lambda x: {feature_key: x[feature_key]}, num_parallel_calls=AUTOTUNE)
dataset = dataset.padded_batch(batch_size, padded_shapes={feature_key: [-1]})
def _my_fn(x):
tokens = tf.reshape(x[feature_key], [-1])
# strip padding
tokens = tf.boolean_mask(tokens, tf.cast(tokens, tf.bool))
return {feature_key: tokens}
return dataset.map(_my_fn, num_parallel_calls=AUTOTUNE)
def split_tokens(dataset: tf.data.Dataset,
min_tokens_per_segment: Optional[int] = None,
max_tokens_per_segment: int = gin.REQUIRED,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[Sequence[str]] = None,
**unused_kwargs) -> tf.data.Dataset:
"""Split examples into multiple examples each.
The intended use case is to break up long examples for use in unsupervised
transfer-learning.
This function is generally preceded by select_random_chunk.
If min_tokens_per_segment is provided, the segment length is chosen randomly
per document from a log-uniform distribution. If min_tokens_per_segment is
None, then the segment length is max_tokens_per_segment (except for a possibly
shorter last segment in each document).
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
min_tokens_per_segment: an optional integer
max_tokens_per_segment: an integer, the maximum number of tokens in each
segment. Only the final segment may be shorter.
feature_key: a string, the feature to split
additional_feature_keys: Additional features to split. The same chunk size
will be used, so they should be the same size as feature_key.
passthrough_feature_keys: Features to pass through without any splitting.
Returns:
a dataset
"""
if passthrough_feature_keys:
split_keys = set([feature_key] + (additional_feature_keys or []))
overlap_keys = split_keys & set(passthrough_feature_keys)
if overlap_keys:
raise ValueError(
f'split keys {overlap_keys} also included in passthrough keys')
def _split_tokens(x, seed):
"""Split one token sequence into multiple sequences."""
tokens = x[feature_key]
n_tokens = tf.shape(tokens)[0]
if min_tokens_per_segment is None:
length = max_tokens_per_segment
else:
# pick a length - log-uniformly distributed
length = tf.cast(
tf.exp(
tf.random.stateless_uniform(
[],
minval=math.log(min_tokens_per_segment),
maxval=math.log(max_tokens_per_segment),
seed=seed
)
),
tf.int32)
# Pad to a multiple of length, then use tf.reshape to split up the tokens
# into num_segments segments each of the given length.
num_segments = tf.cast(
tf.math.ceil(
tf.cast(n_tokens, tf.float32) / tf.cast(length, tf.float32))
,
tf.int32)
padding = num_segments * length - tf.shape(tokens)[0]
feature_keys_to_split = [feature_key]
orig_lengths = {}
outputs = {}
if additional_feature_keys is not None:
feature_keys_to_split.extend(additional_feature_keys)
for k in feature_keys_to_split:
with tf.control_dependencies([
tf.assert_equal(
tf.shape(tokens)[0],
tf.shape(x[k])[0],
message=(f'Additional feature {k} is not the same size as '
f'{feature_key} along axis 0 in split_tokens().')
)
]):
shape = tf.shape(x[k])[1:]
shape_list = x[k].shape[1:]
padded = tf.pad(
x[k],
tf.concat([[[0, padding]],
tf.zeros([len(shape_list), 2], dtype=tf.int32)],
axis=0))
orig_lengths[k] = tf.concat(
[tf.repeat(length, num_segments - 1), [length - padding]], axis=0)
outputs[k] = tf.reshape(
padded, tf.concat([[-1, length], shape], axis=0))
# To avoid memory issues, don't just replicate the passthrough features
# for every segment; use tf.data to do it so the copies don't get
# instantiated all at once.
outputs_ds = tf.data.Dataset.from_tensor_slices(outputs)
orig_lengths_ds = tf.data.Dataset.from_tensor_slices(orig_lengths)
if passthrough_feature_keys:
passthrough = {k: v for k, v in x.items()
if k in passthrough_feature_keys}
passthrough_ds = tf.data.Dataset.from_tensors(passthrough).repeat(
tf.cast(num_segments, tf.int64))
return tf.data.Dataset.zip((outputs_ds, orig_lengths_ds, passthrough_ds))
else:
return tf.data.Dataset.zip((outputs_ds, orig_lengths_ds))
def _strip_padding_and_merge_passthrough(
inputs, orig_lengths, passthrough=None):
output = {}
for k, v in inputs.items():
output[k] = v[:orig_lengths[k]]
if passthrough:
for k, v in passthrough.items():
output[k] = passthrough[k]
return output
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
dataset = _split_tokens(dataset).flat_map(lambda z: z)
dataset = dataset.map(
_strip_padding_and_merge_passthrough, num_parallel_calls=AUTOTUNE)
return dataset
The provided code snippet includes necessary dependencies for implementing the `full_lm` function. Write a Python function `def full_lm(dataset, sequence_length, output_features)` to solve the following problem:
Full language modeling objective with EOS only at document boundaries.
Here is the function:
def full_lm(dataset, sequence_length, output_features):
"""Full language modeling objective with EOS only at document boundaries."""
ds = dataset
ds = select_random_chunk(ds, output_features=output_features,
feature_key='targets', max_length=65536)
ds = seqio.preprocessors.append_eos(ds, output_features)
ds = reduce_concat_tokens(ds, feature_key='targets', batch_size=128)
# Don't use `split_tokens_to_targets_length` since we've alrady added EOS.
ds = split_tokens(ds, max_tokens_per_segment=sequence_length['targets'])
return ds | Full language modeling objective with EOS only at document boundaries. |
59 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `trim_tokens_at_front` function. Write a Python function `def trim_tokens_at_front(x, sequence_length, keys_to_trim=None, **unused_kwargs)` to solve the following problem:
Token-preprocessor to trim sequence at the beginning. Args: x: an example with dictionaries containing keys_to_trim. sequence_length: a dict of ints. keys_to_trim: a list of feature keys. Returns: A preprocessed example.
Here is the function:
def trim_tokens_at_front(x,
sequence_length,
keys_to_trim=None,
**unused_kwargs):
"""Token-preprocessor to trim sequence at the beginning.
Args:
x: an example with dictionaries containing keys_to_trim.
sequence_length: a dict of ints.
keys_to_trim: a list of feature keys.
Returns:
A preprocessed example.
"""
for key in (keys_to_trim or sequence_length.keys()):
if key in x:
# trim tokens, leaving room for EOS which gets added later
x[key] = x[key][-(sequence_length[key] - 1):]
return x | Token-preprocessor to trim sequence at the beginning. Args: x: an example with dictionaries containing keys_to_trim. sequence_length: a dict of ints. keys_to_trim: a list of feature keys. Returns: A preprocessed example. |
60 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `trivia_qa_truncate_inputs` function. Write a Python function `def trivia_qa_truncate_inputs(dataset, output_features, sequence_length)` to solve the following problem:
Token preprocessor for the trivia QA dataset to truncate inputs. This function takes a dataset containing "targets" and "inputs". It searches for the "targets" in the "inputs" and truncates the "inputs" to `sequence_length` while ensuring that the "targets" are present in the "inputs". The function will randomly select a subset of "inputs". If "targets" are not found in the "inputs", then the example is is dropped from the dataset. E.g. Input dataset { "inputs": [0, 3, 5, 7, 9, 11, 13, 15, 17, 18] "targets": [5, 7, 9] } Output dataset (assuming sequence_length['inputs'] = 4) { "inputs": [3, 5, 7, 9] "targets": [5, 7, 9] } or { "inputs": [5, 7, 9, 11] "targets": [5, 7, 9] } Args: dataset: a tf.data.Dataset with dictionaries containing the "inputs" and "targets". output_features: unused by this function. sequence_length: a dict, with keys as "inputs" and "targets" indicating the maximum number of tokens in each of the sequences. Returns: a dataset
Here is the function:
def trivia_qa_truncate_inputs(dataset, output_features, sequence_length):
"""Token preprocessor for the trivia QA dataset to truncate inputs.
This function takes a dataset containing "targets" and "inputs". It searches
for the "targets" in the "inputs" and truncates the "inputs" to
`sequence_length` while ensuring that the "targets" are present in the
"inputs". The function will randomly select a subset of "inputs".
If "targets" are not found in the "inputs", then the example is
is dropped from the dataset.
E.g.
Input dataset
{
"inputs": [0, 3, 5, 7, 9, 11, 13, 15, 17, 18]
"targets": [5, 7, 9]
}
Output dataset (assuming sequence_length['inputs'] = 4)
{
"inputs": [3, 5, 7, 9]
"targets": [5, 7, 9]
}
or
{
"inputs": [5, 7, 9, 11]
"targets": [5, 7, 9]
}
Args:
dataset: a tf.data.Dataset with dictionaries containing the "inputs" and
"targets".
output_features: unused by this function.
sequence_length: a dict, with keys as "inputs" and "targets" indicating the
maximum number of tokens in each of the sequences.
Returns:
a dataset
"""
del output_features
@seqio.map_over_dataset(num_seeds=1)
def my_fn(features, seed):
"""Function to map original dataset to the new dataset."""
inputs = features['inputs']
targets = features['targets']
ans_len = tf.shape(targets)[0]
max_input_tokens = sequence_length['inputs']
def truncate_inputs():
"""Helper function to truncate the inputs."""
def answer_in_context(context, answer):
"""Helper function that checks if the answer is present in the context.
Args:
context: Tensor, tokenized representation of the context
answer: Tensor, tokenized representation of the answer
Returns:
result: boolean, indicates if the answer was present in the context.
pos_mask: boolean mask, a mask for every possible start position of
the answer in the context. Indicates whether the answer starts at
the particular position.
"""
conv_inp = tf.reshape(tf.cast(context, tf.float32), [1, -1, 1])
ans_len = tf.shape(answer)[0]
filters = tf.eye(ans_len, dtype=tf.float32)
# Assume context len is N and answer len is M.
# Use a convolution to create a matrix of (N-M) x M elements where
# each row of the matrix is a sequence of len M. This matrix contains
# all possible contiguous sequences of length M from the context.
# Every row of this matrix is compared with the answer to check if the
# answer exists in the context.
strided = tf.nn.conv1d(conv_inp,
tf.reshape(filters, [ans_len, 1, ans_len]), 1,
'VALID')
strided = tf.cast(strided[0], answer.dtype)
pos_mask = tf.reduce_all(
tf.equal(strided, tf.reshape(answer, [1, -1])), 1)
result = tf.reduce_any(pos_mask)
return result, pos_mask
def slice_inputs(inputs, answer_len, pos_mask, seed=None):
"""Helper function to slice inputs while keeping the answer."""
ans_start_pos = tf.cast(tf.where(pos_mask)[0][0], tf.int32)
inputs_len = tf.shape(inputs)[0]
start_range_min = tf.maximum(
0, ans_start_pos - (max_input_tokens - answer_len))
start_range_max = tf.minimum(ans_start_pos,
inputs_len - max_input_tokens) + 1
start_pos = tf.random.stateless_uniform(
[],
minval=start_range_min,
maxval=start_range_max,
dtype=tf.int32,
seed=seed)
return inputs[start_pos:start_pos + max_input_tokens]
result, pos_mask = answer_in_context(inputs, targets)
if result:
return slice_inputs(inputs, ans_len, pos_mask, seed=seed)
else:
return tf.constant([], dtype=inputs.dtype)
if tf.greater(tf.shape(inputs)[0], max_input_tokens):
inputs = truncate_inputs()
return {'inputs': inputs, 'targets': features['targets']}
dataset = my_fn(dataset)
return dataset.filter(lambda x: tf.size(x['inputs']) > 0) | Token preprocessor for the trivia QA dataset to truncate inputs. This function takes a dataset containing "targets" and "inputs". It searches for the "targets" in the "inputs" and truncates the "inputs" to `sequence_length` while ensuring that the "targets" are present in the "inputs". The function will randomly select a subset of "inputs". If "targets" are not found in the "inputs", then the example is is dropped from the dataset. E.g. Input dataset { "inputs": [0, 3, 5, 7, 9, 11, 13, 15, 17, 18] "targets": [5, 7, 9] } Output dataset (assuming sequence_length['inputs'] = 4) { "inputs": [3, 5, 7, 9] "targets": [5, 7, 9] } or { "inputs": [5, 7, 9, 11] "targets": [5, 7, 9] } Args: dataset: a tf.data.Dataset with dictionaries containing the "inputs" and "targets". output_features: unused by this function. sequence_length: a dict, with keys as "inputs" and "targets" indicating the maximum number of tokens in each of the sequences. Returns: a dataset |
61 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
The provided code snippet includes necessary dependencies for implementing the `unsupervised` function. Write a Python function `def unsupervised(dataset, preprocessors=None, output_features=None, sequence_length=None)` to solve the following problem:
Configure this to point at unsupervised preprocessors. This function creates an extra level of indirection in case we want different unsupervised pretraining functions in the future which do not fit into the denoise() framework. This function should be used as a post-cache preprocessing function. Args: dataset: A tf.data.Dataset to process. preprocessors: a list of token-preprocessor functions. These functions should take unused kwargs if output_features or sequence_length is not used. output_features: dict(str, Feature), output features of the Task to be passed to the model. sequence_length: dict mapping feature key to int length for that feature. Returns: A preprocessed tf.data.Dataset.
Here is the function:
def unsupervised(dataset,
preprocessors=None,
output_features=None,
sequence_length=None):
"""Configure this to point at unsupervised preprocessors.
This function creates an extra level of indirection in case we want
different unsupervised pretraining functions in the future which do not
fit into the denoise() framework.
This function should be used as a post-cache preprocessing function.
Args:
dataset: A tf.data.Dataset to process.
preprocessors: a list of token-preprocessor functions. These functions
should take unused kwargs if output_features or sequence_length is not
used.
output_features: dict(str, Feature), output features of the Task to be
passed to the model.
sequence_length: dict mapping feature key to int length for that feature.
Returns:
A preprocessed tf.data.Dataset.
"""
if preprocessors is None:
logging.warning(
'unsupervised preprocessor got preprocessors=None; no preprocessing '
'will be applied.'
)
return dataset
kwargs = {}
if output_features:
kwargs['output_features'] = output_features
if sequence_length:
kwargs['sequence_length'] = sequence_length
for p in preprocessors:
dataset = p(dataset, **kwargs)
return dataset | Configure this to point at unsupervised preprocessors. This function creates an extra level of indirection in case we want different unsupervised pretraining functions in the future which do not fit into the denoise() framework. This function should be used as a post-cache preprocessing function. Args: dataset: A tf.data.Dataset to process. preprocessors: a list of token-preprocessor functions. These functions should take unused kwargs if output_features or sequence_length is not used. output_features: dict(str, Feature), output features of the Task to be passed to the model. sequence_length: dict mapping feature key to int length for that feature. Returns: A preprocessed tf.data.Dataset. |
62 | import collections
import functools
import math
import re
from typing import Any, Callable, Mapping, Optional, Protocol, Sequence, Union
import uuid
from absl import logging
import babel
import gin
import seqio
import tensorflow.compat.v2 as tf
def split_tokens(dataset: tf.data.Dataset,
min_tokens_per_segment: Optional[int] = None,
max_tokens_per_segment: int = gin.REQUIRED,
feature_key: str = 'targets',
additional_feature_keys: Optional[Sequence[str]] = None,
passthrough_feature_keys: Optional[Sequence[str]] = None,
**unused_kwargs) -> tf.data.Dataset:
def split_tokens_to_targets_length(dataset, sequence_length,
output_features, **kwargs):
max_tokens = sequence_length['targets']
if output_features['targets'].add_eos:
# Leave room to insert an EOS token.
max_tokens -= 1
return split_tokens(dataset, max_tokens_per_segment=max_tokens, **kwargs) | null |
End of preview. Expand
in Dataset Viewer.
RepoExec: Evaluate Code Generation with a Repository-Level Executable Benchmark
Dataset Summary
This source contains the instruction-tuning dataset to fine-tune models in our work.
Dataset Structure
Data Instances
{
"id": 0,
"prompt": "import base64\nimport random\nimport unicodedata\nimport zlib\nfrom typing import Union\nfrom uuid import uuid4\nfrom ._regex import *\nfrom .errors import InvalidInputError\nfrom .validation import is_snake_case, is_full_string, is_camel_case, is_integer, is_string\n\nclass InvalidInputError(TypeError):\n \"\"\"\n Custom error raised when received object is not a string as expected.\n \"\"\"\n\n def __init__(self, input_data: Any):\n \"\"\"\n :param input_data: Any received object\n \"\"\"\n type_name = type(input_data).__name__\n msg = 'Expected \"str\", received \"{}\"'.format(type_name)\n super().__init__(msg)\n\ndef is_string(obj: Any) -> bool:\n \"\"\"\n Checks if an object is a string.\n\n *Example:*\n\n >>> is_string('foo') # returns true\n >>> is_string(b'foo') # returns false\n\n :param obj: Object to test.\n :return: True if string, false otherwise.\n \"\"\"\n return isinstance(obj, str)\n\ndef reverse(input_string: str) -> str:\n \"\"\"\n Returns the string with its chars reversed.\n\n *Example:*\n\n >>> reverse('hello') # returns 'olleh'\n\n :param input_string: String to revert.\n :type input_string: str\n :return: Reversed string.\n \"\"\"\n",
"docstring":
}
Data Fields
Data fields for inline level:
- id (string): the unique id
- prompt (string): sequence to fine-tune LM
- docstring (string): docstring of the target function. If docstring is not None, instruction template is applied; otherwise raw format or small context is applied.
Data Splits
The instruction tuning dataset is not split and only contains data
subset.
Usage
You can load this dataset using datasets library: pip install datasets
from datasets import load_dataset
# Load full dataset
dataset = load_dataset("Fsoft-AIC/RepoExec-Instruct")
Additional Information
Other Resources:
- Github: https://github.com/FSoft-AI4Code/RepoExec
- Webpage: https://fsoft-ai4code.github.io/repoexec
- Leaderboard: https://repoexec.github.io
- Paper: https://arxiv.org/html/2406.11927v1
Licensing Information
MIT License
Citation Information
@article{nam2024repoexec,
title={RepoExec: Evaluate Code Generation with a Repository-Level Executable Benchmark},
author={Hai, Nam Le and Manh, Dung Nguyen and Bui, Nghi DQ},
journal={arXiv preprint arXiv:2406.11927v1},
year={2024}
}
Contributions
This dataset is developed by FSOFT AI4Code team.
- Downloads last month
- 41