Dataset Viewer
text
stringlengths 0
24.9k
|
---|
from taipy.core.config import Config, Scope, Frequency import taipy as tp import datetime as dt import pandas as pd import time Config.load('config_09.toml') Config.configure_job_executions(mode="standalone", max_nb_of_workers=2) def filter_by_month(df, month): df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == month] return df def count_values(df): print("Wait 10 seconds") time.sleep(10) return len(df) def callback_scenario_state(scenario, job): """All the scenarios are subscribed to the callback_scenario_state function. It means whenever a job is done, it is called. Depending on the job and the status, it will update the message stored in a json that is then displayed on the GUI. Args: scenario (Scenario): the scenario of the job changed job (_type_): the job that has its status changed """ print(scenario.name) if job.status.value == 7: for data_node in job.task.output.values(): print(data_node.read()) if __name__=="__main__": # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario_1 = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario_1.subscribe(callback_scenario_state) scenario_1.submit(wait=True) scenario_1.submit(wait=True, timeout=5)
|
from taipy import Config import taipy as tp def double(nb): return nb * 2 Config.load('config_02.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] scenario = tp.create_scenario(scenario_cfg, name="Scenario") tp.submit(scenario) print("Output of First submit:", scenario.output.read()) print("Before write", scenario.input.read()) scenario.input.write(54) print("After write",scenario.input.read()) tp.submit(scenario) print("Second submit",scenario.output.read()) # Basic functions of Taipy Core print([s.name for s in tp.get_scenarios()]) scenario = tp.get(scenario.id) tp.delete(scenario.id) scenario = None data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
|
from taipy.core.config import Config import taipy as tp import datetime as dt import pandas as pd def filter_current(df): current_month = dt.datetime.now().month df['Date'] = pd.to_datetime(df['Date']) df = df[df['Date'].dt.month == current_month] return df def count_values(df): return len(df) Config.load('config_03.toml') if __name__ == '__main__': # my_scenario is the id of the scenario configured scenario_cfg = Config.scenarios['my_scenario'] tp.Core().run() scenario = tp.create_scenario(scenario_cfg, creation_date=dt.datetime(2022,10,7), name="Scenario 2022/10/7") scenario.submit() print("Nb of values of scenario:", scenario.nb_of_values.read()) data_node = None tp.Gui("""<|{scenario}|scenario_selector|> <|{scenario}|scenario|> <|{scenario}|scenario_dag|> <|{data_node}|data_node_selector|>""").run()
|
from importlib import util, import_module from pathlib import Path import sys import inspect import os import json if len(sys.argv) < 3: print("Packages should be passed as arguments after the name of the searched file.", file=sys.stderr) exit(1) else: errors = 0 file_name = sys.argv[1] result = dict() exit_code = 1 for package in sys.argv[2:]: parts = package.split(".") package_found = True for idx in range(len(parts)): if not util.find_spec(".".join(parts[0: idx+1])): package_found = False break if not package_found: print(f"Package {package} not found.", file=sys.stderr) errors += 1 else: module = import_module(package) found = False try: module_file = inspect.getfile(module) for root, dirs, files in os.walk(Path(module_file).parent.resolve()): root_path = Path(root) if file_name in files: result[package] = str((root_path / file_name).resolve()) found = True except Exception as e: print(f"Error accessing {package}: {e}.", file=sys.stderr) exit_code += 1 if not found: print(f"File {file_name} not found in Package {package}.", file=sys.stderr) errors += 1 if len(result): json.dump(result, sys.stdout) elif errors: exit(exit_code)
|
from taipy import Gui import cv2 face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") number_of_faces_detected = 0 selected_file = None image = None def process_image(state): img = cv2.imread(state.selected_file, cv2.IMREAD_UNCHANGED) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) state.number_of_faces_detected = len(faces) # Draw a rectangle around faces for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 2) state.image = cv2.imencode(".jpg", img)[1].tobytes() content = """ <|{selected_file}|file_selector|label=Upload File|on_action=process_image|extensions=.jpg,.gif,.png|drop_message=Drop Message|> <|{image}|image|width=300px|height=300px|> <|{number_of_faces_detected} face(s) detected|> """ if __name__ == "__main__": Gui(page=content).run(dark_mode=False, port=8080)
|
from setuptools import find_packages, setup setup( author="You Name", author_email="[email protected]", python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", # "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], # license="Apache License 2.0", install_requires=["taipy-gui>=2.0"], include_package_data=True, name="guiext-library", description="My taipy-gui extension demo", long_description="This package contains a demonstration of using the Taipy GUI Extension API.", keywords="taipy", packages=find_packages(include=["demo_lib", "demo_lib.*"]), version="1.0.0", zip_safe=False )
|
from taipy.gui import Gui from library import Library page = """ # Extension library <|library.element|> """ gui = Gui(page=page) gui.add_library(Library()) if __name__ == "__main__": # Run main app gui.run()
|
from pathlib import Path from taipy.gui.extension import ElementLibrary, Element, ElementProperty, PropertyType class Library(ElementLibrary): elts = { # Declare the elements of the library here, as key/value pairs of # a dictionary. # - The key is used as the element name. # - The value must be an instance of taipy.gui.extension.Element # # Ex: # "element_name": Element( # "default_property_name" # { # "property_name": ElementProperty(...) # }, # react_component="ElementComponent" # ), } def get_name(self) -> str: return "library" def get_elements(self) -> dict: return Library.elts def get_scripts(self) -> list[str]: # Only one JavaScript bundle for this library. return ["library/frontend/dist/library.js"] def get_resource(self, name: str) -> Path: return super().get_resource(name)
|
# Export the library class for easier access by developers using it from .library import Library
|
#Import modules import taipy as tp from taipy import Config, Scope, Gui import pandas as pd import numpy as np #Back-End Code #Filter function for best/worst colleges within 1 stat def filtering_college(initial_dataset: pd.DataFrame, selected_stat, ): completed_graph_dataset = initial_dataset[selected_stat] completed_graph_data = completed_graph_dataset.nlargest(10, selected_stat, keep = "all") return completed_graph_data #Data Node Creation initial_dataset_cfg = Config.configure_data_node(id="initial_dataset",storage_type="csv",path="College_Data.csv",scope=Scope.GLOBAL) selected_stat_cfg = Config.configure_data_node(id = "selected_stat", default_data = "Name", slope = Scope.GLOBAL) completed_graph_data_cfg = Config.configure_data_node(id="completed_graph_data", scope=Scope.GLOBAL) #Task Creation filtered_college_cfg = Config.configure_task(id = "filtered_college", function=filtering_college, input = [initial_dataset_cfg, selected_stat_cfg], output = [completed_graph_data_cfg]) #Pipeline Creation pipeline_cfg = Config.configure_scenario(id="pipeline",task_configs=[filtered_college_cfg]) #Scenario Creation scenario_cfg = Config.configure_scenario(id = "scenario", pipeline_configs = [pipeline_cfg]) #scenario = tp.create_scenario(scenario_cfg) #Core creation if __name__ == "__main__": tp.Core().run() #Start of Front-End Code #Callback Function def modify_df(state): scenario.selected_node.write(state.selected_stat) tp.submit(scenario) state.df = scenario.completed_graph_data_cfg.read() list_stats = ["Name","Private","Apps","Accept","Enroll","Top10perc","Top25perc","F.Undergrad","P.Undergrad","Outstate","Room.Board","Books","Personal","PhD","Terminal","S.F.Ratio","perc.alumni","Expend","Grad.Rate"] selected_stat = "Top10perc" df = pd.DataFrame(columns = ["Name", selected_stat], copy = True) #Variable Instantiation #App Creation college_stat_app = """<|{selected_stat}|selector|lov={list_stats}|on_change=modify_df|dropdown|> <|{df}|chart|x=Name|y=selected_stat|type=bar|title=College Stats|>""" #Runs the app (finally) print(selected_stat) Gui(page = college_stat_app).run()
|
import json def add_line(source, line, step): line = line.replace('Getting Started with Taipy GUI', 'Getting Started with Taipy GUI on Notebooks') line = line.replace('(../src/', '(https://docs.taipy.io/en/latest/getting_started/src/') if line.startswith('[1].split(')')[0] width = line.split('](')[1].split(')')[1].split(' ')[1] source.append('<div align="center">\n') source.append(f' <img src={img_src} {width}>\n') source.append('</div>\n') elif step == 'step_00' and line.startswith('from taipy'): source.append("from taipy.gui import Gui, Markdown\n") elif 'Notebook' in line and 'step' in step: pass else: source.append(line + '\n') return source def detect_new_cell(notebook, source, cell, line, execution_count, force_creation=False): if line.startswith('```python') or line.startswith('```') and cell == 'code' or force_creation: source = source[:-1] if cell == 'code': notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": source }) cell = 'markdown' execution_count += 1 else: notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) cell = 'code' source = [] return cell, source, notebook, execution_count def create_introduction(notebook, execution_count): with open('index.md', 'r') as f: text = f.read() split_text = text.split('\n') source = [] for line in split_text: if not line.startswith('``` console'): add_line(source, line, 'index') else: break notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": source }) notebook['cells'].append({ "cell_type": "code", "metadata": {}, "outputs": [], "execution_count": execution_count, "source": ['# !pip install taipy\n'] }) notebook['cells'].append({ "cell_type": "markdown", "metadata": {}, "source": ['## Using Notebooks\n',] }) execution_count += 1 return notebook, execution_count def create_steps(notebook, execution_count): steps = ['step_0' + str(i) for i in range(1, 8)] source = [] for step in steps: if source != []: cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count, force_creation=True) with open(step + '/ReadMe.md', 'r') as f: text = f.read() split_text = text.split('\n') cell = "markdown" for_studio = 0 for line in split_text: if cell == "markdown": line=line.replace(" ","") elif cell == "code" and (line[:4] == " " or len(line)<=1) and for_studio == 2: line=line[4:] else: for_studio = 0 if '=== "Taipy Studio' in line: for_studio = 1 if '=== "Python configuration"' in line: for_studio = 2 if for_studio != 1: add_line(source, line, step) cell, source, notebook, execution_count = detect_new_cell(notebook, source, cell, line, execution_count) return notebook, execution_count if __name__ == '__main__': notebook = { "cells": [], "metadata": { "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 } execution_count = 0 notebook, execution_count = create_introduction(notebook, execution_count) notebook, execution_count = create_steps(notebook, execution_count) with open('getting_started.ipynb', 'w', encoding='utf-8') as f: json.dump(notebook, f, indent=2)
|
from taipy.gui import Gui, notify text = "Original text" # Definition of the page page = """ # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> <|Run local|button|on_action=on_button_action|> """ def on_button_action(state): notify(state, 'info', f'The text is: {state.text}') state.text = "Button Pressed" def on_change(state, var_name, var_value): if var_name == "text" and var_value == "Reset": state.text = "" return Gui(page).run()
|
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ # Getting started with Taipy GUI <|layout|columns=1 1| <| My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> |> <|Table|expandable| <|{dataframe}|table|width=100%|number_format=%.2f|> |> |> <|layout|columns=1 1 1| ## Positive <|{np.mean(dataframe['Score Pos'])}|text|format=%.2f|raw|> ## Neutral <|{np.mean(dataframe['Score Neu'])}|text|format=%.2f|raw|> ## Negative <|{np.mean(dataframe['Score Neg'])}|text|format=%.2f|raw|> |> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) dataframe2 = dataframe.copy() def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text[:50], "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" path = "" treatment = 0 page_file = """ <|{path}|file_selector|extensions=.txt|label=Upload .txt file|on_action=analyze_file|> <|{f'Downloading {treatment}%...'}|> <br/> <|Table|expandable| <|{dataframe2}|table|width=100%|number_format=%.2f|> |> <br/> <|{dataframe2}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|height=600px|> """ def analyze_file(state): state.dataframe2 = dataframe2 state.treatment = 0 with open(state.path,"r", encoding='utf-8') as f: data = f.read() # split lines and eliminates duplicates file_list = list(dict.fromkeys(data.replace('\n', ' ').split(".")[:-1])) for i in range(len(file_list)): text = file_list[i] state.treatment = int((i+1)*100/len(file_list)) temp = state.dataframe2.copy() scores = analyze_text(text) temp.loc[len(temp)] = scores state.dataframe2 = temp state.path = None pages = {"/":"<|toggle|theme|>\n<center>\n<|navbar|>\n</center>", "line":page, "text":page_file} Gui(pages=pages).run()
|
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ # Getting started with Taipy GUI <|layout|columns=1 1| <| My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> |> <|Table|expandable| <|{dataframe}|table|width=100%|number_format=%.2f|> |> |> <|layout|columns=1 1 1| ## Positive <|{float(np.mean(dataframe['Score Pos']))}|text|format=%.2f|raw|>% ## Neutral <|{float(np.mean(dataframe['Score Neu']))}|text|format=%.2f|raw|>% ## Negative <|{float(np.mean(dataframe['Score Neg']))}|text|format=%.2f|raw|>% |> <br/> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text, "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" Gui(page).run()
|
from taipy.gui import Gui text = "Original text" page = """ # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> """ Gui(page).run()
|
from transformers import AutoTokenizer from transformers import AutoModelForSequenceClassification from scipy.special import softmax import numpy as np import pandas as pd from taipy.gui import Gui, notify text = "Original text" MODEL = f"cardiffnlp/twitter-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL) model = AutoModelForSequenceClassification.from_pretrained(MODEL) dataframe = pd.DataFrame({"Text":[''], "Score Pos":[0.33], "Score Neu":[0.33], "Score Neg":[0.33], "Overall":[0]}) # Torch is, for now, only available for the Python version between 3.8 and 3.10. # If you cannot install these packages, just return a dictionary of random numbers for the `analyze_text(text).` def analyze_text(text): # Run for Roberta Model encoded_text = tokenizer(text, return_tensors='pt') output = model(**encoded_text) scores = output[0][0].detach().numpy() scores = softmax(scores) return {"Text":text, "Score Pos":scores[2], "Score Neu":scores[1], "Score Neg":scores[0], "Overall":scores[2]-scores[0]} def local_callback(state): notify(state, 'Info', f'The text is: {state.text}', True) temp = state.dataframe.copy() scores = analyze_text(state.text) temp.loc[len(temp)] = scores state.dataframe = temp state.text = "" page = """ <|toggle|theme|> # Getting started with Taipy GUI My text: <|{text}|> Enter a word: <|{text}|input|> <|Analyze|button|on_action=local_callback|> ## Positive <|{float(np.mean(dataframe['Score Pos']))}|text|format=%.2f|>% ## Neutral <|{float(np.mean(dataframe['Score Neu']))}|text|format=%.2f|>% ## Negative <|{float(np.mean(dataframe['Score Neg']))}|text|format=%.2f|>% <|{dataframe}|table|number_format=%.2f|> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ Gui(page).run()
|
from taipy import Gui Gui(page="# Getting started with *Taipy*").run()
|
import pandas as pd from taipy.gui import Gui, notify text = "Original text" page = """ <|toggle|theme|> # Getting started with Taipy GUI My text: <|{text}|> <|{text}|input|> <|Analyze|button|on_action=local_callback|> <|{dataframe}|table|number_format=%.2f|> <|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|> """ dataframe = pd.DataFrame({"Text":['Test', 'Other', 'Love'], "Score Pos":[1, 1, 4], "Score Neu":[2, 3, 1], "Score Neg":[1, 2, 0], "Overall":[0, -1, 4]}) def local_callback(state): notify(state, 'info', f'The text is: {state.text}') temp = state.dataframe.copy() temp.loc[len(temp)] = {"Text":state.text, "Score Pos":0, "Score Neu":0, "Score Neg":0, "Overall":0} state.dataframe = temp state.text = "" Gui(page).run()
|
from taipy.gui import Gui, Markdown input_pid = None navigation = [("/add_product", "Add Prodcut"), "/", "Home"] page = """ # Admin Panel ## Add Product def submit_button(state): state. <|{input_pid}|input|> <|submit|button|on_action = submit_button> """ page1 = """ ii """ Gui(page=page+page1).run(title="Go To Mall | Admin Panel", port=4000)
|
from taipy import Config from taipy import Core, Gui from taipy.gui import Markdown import taipy as tp from pages.home import home_md from pages.temp import temp_page def build_message(name: str): return f"Hello! {name}" input_name_data_node_cfg = Config.configure_data_node(id="input_name") message_data_node_cfg = Config.configure_data_node(id="message") build_msg_task_cfg = Config.configure_task( "buil_msg", build_message, input_name_data_node_cfg, message_data_node_cfg) scenario_cfg = Config.configure_scenario( "scenariod", task_configs=[build_msg_task_cfg]) # making GUI input_name = "M ahi" message = None def submit_scenario(state): state.scenario.input_name.write(state.input_name) state.scenario.submit() state.message = scenario.message.read() love = "sazia" page = """ Name: <|{input_name}|input|> <|submit|button|on_action=submit_scenario|> Message: <|{message}|text|> Kima : <|All world are need to safe|text|> """ pages = { "/": home_md, "temp": temp_page, } if __name__ == "__main__": Core().run() # mange scenarios and data scenario = tp.create_scenario(scenario_cfg) # instance of run gui Gui(pages=pages).run(title="Mahi Template", port=5000, favicon="https://www.youtube.com/s/gaming/emoji/7ff574f2/emoji_u1f602.png", )
|
from taipy.gui import Gui, Markdown name = "maho" ... page = """ ... <|{dialog_is_visible}|dialog| Enter a name: <|{name}|input|> |> ... """ ... pages = { "/" : page, 'page1': Markdown("# My first page"), 'page2': Markdown("# My second page") } Gui(pages=pages).run(title="Mahi App",port=5001 )
|
from taipy.gui import Markdown, Gui text = "Welcome to home page" mahi_text = "So how are you" home_md = Markdown(""" # **Home** <|{text}|> <br/> <|{mahi_text}|> """)
|
from taipy.gui import Gui, Markdown def fahren_to_celcius(fahr): return (fahr-32)*5/9 fahr = 100 celcious = fahren_to_celcius(fahr) temp_page = Markdown(""" # **Home** Fahrenheit : <|{fahr}|> Converted Celcius : <|{celcious}|> """)
|
from taipy.gui import Gui, Markdown, notify value = 0 single_page = Markdown(""" # Taipy Application Check the documentation [here](https://docs.taipy.io/en/latest/manuals/about/). <|{value}|slider|on_change=on_slider|> <|Push|button|on_action=on_push|> """) def on_push(state): ... def on_slider(state): if state.value == 100: notify(state, "success", "Taipy is running!") def on_change(state, var_name:str, var_value): ... if __name__ == "__main__": gui = Gui(single_page) gui.run()
|
from taipy.gui import Gui from taipy.config import Config from pages.root.root import * from pages.page_1.page_1 import page_1_md from pages.page_2.page_2 import page_2_md Config.load("config/config.toml") def on_change(state, var_name:str, var_value): ... pages = {"/":root_md, "page_1":page_1_md, "page_2":page_2_md} if __name__ == "__main__": gui = Gui(pages=pages) gui.run()
|
from taipy.config import Config
|
import pandas as pd import numpy as np from sklearn.linear_model import LinearRegression def clean_data(data): ... return data.dropna().drop_duplicates() def predict(data): model = LinearRegression() model.fit(data[["x"]], data[["y"]]) data["y_pred"] = model.predict(data[["x"]]) return data def evaluate(data): ... return np.random.rand()
|
from taipy.gui import Markdown import pandas as pd scenario = None results = None def show_results(state): state.results = state.scenario.predictions.read() page_1_md = Markdown("pages/page_1/page_1.md")
|
from taipy.gui import Markdown root_md = Markdown("pages/root/root.md")
|
from taipy.gui import Markdown import pandas as pd path = None data = None def drop_csv(state): state.data = pd.read_csv(state.path) page_2_md = Markdown("pages/page_2/page_2.md")
|
from taipy.gui import Gui, Markdown, notify from pages.root.root import * from pages.page_1.page_1 import page_1_md from pages.page_2.page_2 import page_2_md def on_change(state, var_name:str, var_value): ... pages = {"/":root_md, "page_1":page_1_md, "page_2":page_2_md} if __name__ == "__main__": gui = Gui(pages=pages) gui.run()
|
from taipy.gui import Markdown page_1_md = Markdown("pages/page_1/page_1.md")
|
from taipy.gui import Markdown root_md = Markdown("pages/root/root.md")
|
from taipy.gui import Markdown page_2_md = Markdown("pages/page_2/page_2.md")
|
from taipy.gui import Gui from pages import home import os gui = Gui(page=home.page).run( title="Demo Logistic Regression", port=os.environ.get("PORT", "8000"), )
|
from config.nodes import ( node_initial_dataset, node_prediction, node_prediction_model, node_X, node_Y, ) from models.data import make_X, make_Y from models.predict import train, predict from taipy import Config task_make_X = Config.configure_task( id="make_X", input=[node_initial_dataset], output=node_X, function=make_X, ) task_make_Y = Config.configure_task( id="make_Y", input=[node_initial_dataset], output=node_Y, function=make_Y, ) task_train = Config.configure_task( id="train", input=[node_X, node_Y], output=node_prediction_model, function=train ) task_predict = Config.configure_task( id="predict", input=[node_X, node_Y], output=node_prediction, function=predict )
|
from taipy import Config node_initial_dataset = Config.configure_data_node(id="initial_dataset") node_X = Config.configure_data_node(id="X") node_Y = Config.configure_data_node(id="Y") node_prediction_model = Config.configure_data_node(id="prediction_model") node_prediction = Config.configure_data_node(id="prediction")
|
from taipy import Config from config.tasks import task_make_X, task_make_Y, task_train, task_predict pipeline_train = Config.configure_pipeline( id="train", task_configs=[task_make_X, task_make_Y, task_train] ) pipeline_predict = Config.configure_pipeline(id="predict", task_configs=[task_predict])
|
from sklearn.linear_model import LogisticRegression def train(X, Y): X_train, Y_train = X[:50], Y[:50] X_test, Y_test = X[50:], Y[50:] # Using scikit-learn default regression = LogisticRegression(random_state=0).fit(X_train, Y_train) print(f"intercept: {regression.intercept_} coefficients: {regression.coef_}") print(f"train accuracy: {regression.score(X_train, Y_train)}") print(f"test accuracy: {regression.score(X_test, Y_test)}") return regression def predict(x, regression: LogisticRegression): return regression.predict(x)
|
import numpy as np # Set seed for random number generator rg = np.random.default_rng(seed=0) # Create an array with 500 rows and 3 columns. # This will serve as initial data node initial_dataset = rg.normal(size=(500, 3)) def make_X(dataset): # Remove the first column which can be considered as noise X1 = np.delete(dataset, 0, axis=1) # Now create two more columns correlated with X1 X2 = X1 + 0.1 * np.random.normal(size=(500, 2)) X = np.concatenate((X1, X2), axis=1) return X def make_Y(dataset): P = 1 / (1 + np.e ** (-np.matmul(dataset, [1, 1, 1]))) Y = P > 0.5 return Y
|
from taipy.gui import Markdown import taipy as tp from taipy.core.job.job import Job from config.pipelines import pipeline_train from models.data import initial_dataset def job_status_changed(pipeline, job: Job): print(job.status) def training_button_clicked(state, id, action): pipeline = tp.create_pipeline(pipeline_train) # Set initial dataset: pipeline.initial_dataset.write(initial_dataset) tp.subscribe_pipeline( pipeline=pipeline, callback=job_status_changed, ) tp.submit(pipeline) page = Markdown("src/pages/home.md")
|
from taipy import Gui page = """ # Hello World 🌍 with *Taipy*This is my first Taipy test app. And it is running fine! """ Gui(page).run(use_reloader=True) # use_reloader=True if you are in development
|
from taipy import Gui from page.dashboard_fossil_fuels_consumption import * if __name__ == "__main__": Gui(page).run( use_reloader=True, title="Test", dark_mode=False, ) # use_reloader=True if you are in development
|
import pandas as pd import taipy as tp from data.data import dataset_fossil_fuels_gdp country = "Spain" region = "Europe" lov_region = list(dataset_fossil_fuels_gdp.Entity.unique()) def load_dataset(_country): """Load dataset for a specific country. Args: _country (str): The name of the country. Returns: pandas.DataFrame: A DataFrame containing the fossil fuels GDP data for the specified country. """ dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp.reset_index() dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp_cp[ dataset_fossil_fuels_gdp["Entity"] == _country ] return dataset_fossil_fuels_gdp_cp dataset_fossil_fuels_gdp_cp = load_dataset(country) def on_change_country(state): """Update the dataset based on the selected country. Args: state (object): The "state" of the variables ran by the program (value changes through selectors) Returns: None """ print("country is:", state.country) _country = state.country dataset_fossil_fuels_gdp_cp = load_dataset(_country) state.dataset_fossil_fuels_gdp_cp = dataset_fossil_fuels_gdp_cp layout = {"yaxis": {"range": [0, 100000]}, "xaxis": {"range": [1965, 2021]}} page = """ # Fossil Fuel consumption by per capita by country* Data comes from <a href="https://ourworldindata.org/grapher/per-capita-fossil-energy-vs-gdp" target="_blank">Our World in Data</a> <|{country}|selector|lov={lov_region}|on_change=on_change_country|dropdown|label=Country/Region|> <|{dataset_fossil_fuels_gdp_cp}|chart|type=plot|x=Year|y=Fossil fuels per capita (kWh)|height=200%|layout={layout}|> ## Fossil fuel per capita for <|{country}|>: <|{dataset_fossil_fuels_gdp_cp}|table|height=400px|width=95%|> """
|
import pandas as pd dataset_fossil_fuels_gdp = pd.read_csv("data/per-capita-fossil-energy-vs-gdp.csv") country_codes = pd.read_csv("./data/country_codes.csv") dataset_fossil_fuels_gdp = dataset_fossil_fuels_gdp.merge( country_codes[["alpha-3", "region"]], how="left", left_on="Code", right_on="alpha-3" ) dataset_fossil_fuels_gdp = dataset_fossil_fuels_gdp[ ~dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"].isnull() ].reset_index() dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"] = ( dataset_fossil_fuels_gdp["Fossil fuels per capita (kWh)"] * 1000 )
|
# This is a sample Python script. # Press Maj+F10 to execute it or replace it with your code. # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. def print_hi(name): # Use a breakpoint in the code line below to debug your script. print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint. # Press the green button in the gutter to run the script. if __name__ == '__main__': print_hi('PyCharm') # See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
from taipy.gui import Gui, notify from taipy import Config import pandas as pd tasks = pd.DataFrame({ "Type":[], "Name":[], "Completed":[] }) tasks["Completed"] = tasks["Completed"].astype("bool") task_name="" task_type="" page = """ # TODO Schedular Enter Task: <|{task_name}|input|> Type: <|{task_type}|selector|lov=Personal;Home;Work|dropdown|> <|Add Task|button|on_action=on_task_add|> <|{tasks}|table|filter|editable|editable[Type]=False|on_edit=on_task_edit|on_delete=on_task_delete|style=style_completed|> """ def style_completed(_1, _2, values): if(values["Completed"]): return "strikeout" def on_task_edit(state, var_name, payload): if(var_name == "tasks"): index = payload["index"] col = payload["col"] value = payload["user_value"] new_tasks = state.tasks.copy() new_tasks.loc[index, col] = value state.tasks = new_tasks notify(state, "I", "Task Updated.") def on_task_delete(state, var_name, payload): if(var_name == "tasks"): index = payload["index"] state.tasks = state.tasks.drop(index=index) notify(state, "E", "Task Deleted.") def on_task_add(state, var_name, payload): if(state.task_name == "" or state.task_type == ""): notify(state, "E", "Task Name or Task Type Not Set.") return False _task_type = state.task_type _task_name = state.task_name _isCompleted = False new_data = pd.DataFrame([[_task_type, _task_name, _isCompleted]], columns=state.tasks.columns) state.tasks = pd.concat([new_data, state.tasks], axis=0, ignore_index=True) notify(state, "S", "New Task Added Successfully.") Gui(page, css_file="todo.css").run(use_reloader=True)
|
#!/usr/bin/env python """The setup script.""" import json import os from setuptools import find_namespace_packages, find_packages, setup with open("README.md") as readme_file: readme = readme_file.read() with open(f"src{os.sep}taipy{os.sep}config{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" requirements = ["toml>=0.10,<0.11", "deepdiff>=6.2,<6.3"] test_requirements = ["pytest>=3.8"] setup( author="Avaiga", author_email="[email protected]", python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ], description="A Taipy package dedicated to easily configure a Taipy application.", install_requires=requirements, long_description=readme, long_description_content_type="text/markdown", include_package_data=True, license="Apache License 2.0", keywords="taipy-config", name="taipy-config", package_dir={"": "src"}, packages=find_namespace_packages(where="src") + find_packages(include=["taipy", "taipy.config", "taipy.config.*", "taipy.logger", "taipy.logger.*"]), test_suite="tests", tests_require=test_requirements, url="https://github.com/avaiga/taipy-config", version=version_string, zip_safe=False, )
|
import ast import re from pathlib import Path from typing import List def _get_function_delimiters(initial_line, lines): begin = end = initial_line while True: if lines[begin - 1] == "\n": break begin -= 1 if lines[end].endswith("(\n"): while ":\n" not in lines[end]: end += 1 if '"""' in lines[end + 1]: while True: if '"""\n' in lines[end]: break end += 1 return begin, end + 1 def _get_file_lines(filename: str) -> List[str]: # Get file lines for later with open(filename) as f: return f.readlines() def _get_file_ast(filename: str): # Get raw text and build ast _config = Path(filename) _tree = _config.read_text() return ast.parse(_tree) def _build_base_config_pyi(filename, base_pyi): lines = _get_file_lines(filename) tree = _get_file_ast(filename) class_lineno = [f.lineno for f in ast.walk(tree) if isinstance(f, ast.ClassDef) and f.name == "Config"] begin_class, end_class = _get_function_delimiters(class_lineno[0] - 1, lines) base_pyi += "".join(lines[begin_class:end_class]) functions = [f.lineno for f in ast.walk(tree) if isinstance(f, ast.FunctionDef) and not f.name.startswith("__")] for ln in functions: begin_line, end_line = _get_function_delimiters(ln - 1, lines) base_pyi += "".join(lines[begin_line:end_line]) base_pyi = __add_docstring(base_pyi, lines, end_line) base_pyi += "\n" return base_pyi def __add_docstring(base_pyi, lines, end_line): if '"""' not in lines[end_line - 1]: base_pyi += '\t\t""""""\n'.replace("\t", " ") return base_pyi def _build_entity_config_pyi(base_pyi, filename, entity_map): lines = _get_file_lines(filename) tree = _get_file_ast(filename) functions = {} for f in ast.walk(tree): if isinstance(f, ast.FunctionDef): if "_configure" in f.name and not f.name.startswith("__"): functions[f.name] = f.lineno elif "_set_default" in f.name and not f.name.startswith("__"): functions[f.name] = f.lineno elif "_add" in f.name and not f.name.startswith("__"): functions[f.name] = f.lineno for k, v in functions.items(): begin_line, end_line = _get_function_delimiters(v - 1, lines) try: func = "".join(lines[begin_line:end_line]) func = func if not k.startswith("_") else func.replace(k, entity_map.get(k)) func = __add_docstring(func, lines, end_line) + "\n" base_pyi += func except Exception: print(f"key={k}") raise return base_pyi def _generate_entity_and_property_maps(filename): entities_map = {} property_map = {} entity_tree = _get_file_ast(filename) functions = [ f for f in ast.walk(entity_tree) if isinstance(f, ast.Call) and getattr(f.func, "id", "") == "_inject_section" ] for f in functions: entity = ast.unparse(f.args[0]) entities_map[entity] = {} property_map[eval(ast.unparse(f.args[1]))] = entity # Remove class name from function map text = ast.unparse(f.args[-1]).replace(f"{entity}.", "") matches = re.findall(r"\((.*?)\)", text) for m in matches: v, k = m.replace("'", "").split(",") entities_map[entity][k.strip()] = v return entities_map, property_map def _generate_acessors(base_pyi, property_map): for property, cls in property_map.items(): return_template = f"Dict[str, {cls}]" if property != "job_config" else f"{cls}" template = ("\t@_Classproperty\n" + f'\tdef {property}(cls) -> {return_template}:\n\t\t""""""\n').replace( "\t", " " ) base_pyi += template + "\n" return base_pyi def _build_header(filename): _file = Path(filename) return _file.read_text() + "\n\n" if __name__ == "__main__": header_file = "stubs/pyi_header.py" config_init = Path("taipy-core/src/taipy/core/config/__init__.py") base_config = "src/taipy/config/config.py" dn_filename = "taipy-core/src/taipy/core/config/data_node_config.py" job_filename = "taipy-core/src/taipy/core/config/job_config.py" scenario_filename = "taipy-core/src/taipy/core/config/scenario_config.py" task_filename = "taipy-core/src/taipy/core/config/task_config.py" migration_filename = "taipy-core/src/taipy/core/config/migration_config.py" core_filename = "taipy-core/src/taipy/core/config/core_section.py" entities_map, property_map = _generate_entity_and_property_maps(config_init) pyi = _build_header(header_file) pyi = _build_base_config_pyi(base_config, pyi) pyi = _generate_acessors(pyi, property_map) pyi = _build_entity_config_pyi(pyi, scenario_filename, entities_map["ScenarioConfig"]) pyi = _build_entity_config_pyi(pyi, dn_filename, entities_map["DataNodeConfig"]) pyi = _build_entity_config_pyi(pyi, task_filename, entities_map["TaskConfig"]) pyi = _build_entity_config_pyi(pyi, job_filename, entities_map["JobConfig"]) pyi = _build_entity_config_pyi(pyi, migration_filename, entities_map["MigrationConfig"]) pyi = _build_entity_config_pyi(pyi, core_filename, entities_map["CoreSection"]) with open("src/taipy/config/config.pyi", "w") as f: f.writelines(pyi)
|
import json from typing import Any, Callable, Dict, List, Optional, Union from datetime import timedelta from taipy.core.config import DataNodeConfig, JobConfig, ScenarioConfig, TaskConfig, MigrationConfig, CoreSection from .checker.issue_collector import IssueCollector from .common._classproperty import _Classproperty from .common._config_blocker import _ConfigBlocker from .common.frequency import Frequency from .common.scope import Scope from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os import pathlib from unittest import TestCase, mock from src.taipy.logger._taipy_logger import _TaipyLogger class TestTaipyLogger(TestCase): def test_taipy_logger(self): _TaipyLogger._get_logger().info("baz") _TaipyLogger._get_logger().debug("qux") def test_taipy_logger_configured_by_file(self): path = os.path.join(pathlib.Path(__file__).parent.resolve(), "logger.conf") with mock.patch.dict(os.environ, {"TAIPY_LOGGER_CONFIG_PATH": path}): _TaipyLogger._get_logger().info("baz") _TaipyLogger._get_logger().debug("qux")
|
import os import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.named_temporary_file import NamedTemporaryFile config_from_filename = NamedTemporaryFile( """ [TAIPY] custom_property_not_overwritten = true custom_property_overwritten = 10 """ ) config_from_environment = NamedTemporaryFile( """ [TAIPY] custom_property_overwritten = 11 """ ) def test_load_from_environment_overwrite_load_from_filename(): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 11 os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) def test_block_load_from_environment_overwrite_load_from_filename(): Config.load(config_from_filename.filename) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): os.environ[Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH] = config_from_environment.filename Config.load(config_from_filename.filename) os.environ.pop(Config._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH) assert Config.global_config.custom_property_not_overwritten is True assert Config.global_config.custom_property_overwritten == 10 # The Config.load is failed to override
|
import pytest from src.taipy.config._config import _Config from src.taipy.config._config_comparator._config_comparator import _ConfigComparator from src.taipy.config._serializer._toml_serializer import _TomlSerializer from src.taipy.config.checker.issue_collector import IssueCollector from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest @pytest.fixture(scope="function", autouse=True) def reset(): reset_configuration_singleton() register_test_sections() def reset_configuration_singleton(): Config.unblock_update() Config._default_config = _Config()._default_config() Config._python_config = _Config() Config._file_config = _Config() Config._env_file_config = _Config() Config._applied_config = _Config() Config._collector = IssueCollector() Config._serializer = _TomlSerializer() Config._comparator = _ConfigComparator() def register_test_sections(): Config._register_default(UniqueSectionForTest("default_attribute")) Config.configure_unique_section_for_tests = UniqueSectionForTest._configure Config.unique_section_name = Config.unique_sections[UniqueSectionForTest.name] Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop="default_prop", prop_int=0)) Config.configure_section_for_tests = SectionForTest._configure Config.section_name = Config.sections[SectionForTest.name]
|
from unittest import mock from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config._config_comparator._comparator_result import _ComparatorResult from src.taipy.config.global_app.global_app_config import GlobalAppConfig from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class TestConfigComparator: unique_section_1 = UniqueSectionForTest(attribute="unique_attribute_1", prop="unique_prop_1") unique_section_1b = UniqueSectionForTest(attribute="unique_attribute_1", prop="unique_prop_1b") section_1 = SectionForTest("section_1", attribute="attribute_1", prop="prop_1") section_2 = SectionForTest("section_2", attribute=2, prop="prop_2") section_2b = SectionForTest("section_2", attribute="attribute_2", prop="prop_2b") section_3 = SectionForTest("section_3", attribute=[1, 2, 3, 4], prop=["prop_1"]) section_3b = SectionForTest("section_3", attribute=[1, 2], prop=["prop_1", "prop_2", "prop_3"]) section_3c = SectionForTest("section_3", attribute=[2, 1], prop=["prop_3", "prop_1", "prop_2"]) def test_comparator_compare_method_call(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() with mock.patch( "src.taipy.config._config_comparator._config_comparator._ConfigComparator._find_conflict_config" ) as mck: Config._comparator._find_conflict_config(_config_1, _config_2) mck.assert_called_once_with(_config_1, _config_2) def test_comparator_without_diff(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert isinstance(config_diff, _ComparatorResult) assert config_diff == {} def test_comparator_with_updated_global_config(self): _config_1 = _Config._default_config() _config_1._global_config = GlobalAppConfig(foo="bar") _config_2 = _Config._default_config() _config_2._global_config = GlobalAppConfig(foo="baz", bar="foo") config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get("unconflicted_sections") is None assert config_diff.get("conflicted_sections") is not None conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["modified_items"]) == 1 assert conflicted_config_diff["modified_items"][0] == ( ("Global Configuration", "foo", None), ("bar", "baz"), ) assert len(conflicted_config_diff["added_items"]) == 1 assert conflicted_config_diff["added_items"][0] == ( ("Global Configuration", "bar", None), "foo", ) def test_comparator_with_new_section(self): _config_1 = _Config._default_config() # The first "section_name" is added to the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {"section_1": self.section_1} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["added_items"]) == 1 assert conflicted_config_diff["added_items"][0] == ( ("section_name", None, None), {"section_1": {"attribute": "attribute_1", "prop": "prop_1"}}, ) assert conflicted_config_diff.get("modified_items") is None assert conflicted_config_diff.get("removed_items") is None # A new "section_name" is added to the Config _config_3 = _Config._default_config() _config_3._sections[SectionForTest.name] = {"section_1": self.section_1, "section_2": self.section_2} config_diff = Config._comparator._find_conflict_config(_config_2, _config_3) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["added_items"]) == 1 assert conflicted_config_diff["added_items"][0] == ( ("section_name", "section_2", None), {"attribute": "2:int", "prop": "prop_2"}, ) assert conflicted_config_diff.get("modified_items") is None assert conflicted_config_diff.get("removed_items") is None def test_comparator_with_removed_section(self): _config_1 = _Config._default_config() # All "section_name" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {"section_1": self.section_1} config_diff = Config._comparator._find_conflict_config(_config_2, _config_1) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["removed_items"]) == 1 assert conflicted_config_diff["removed_items"][0] == ( ("section_name", None, None), {"section_1": {"attribute": "attribute_1", "prop": "prop_1"}}, ) assert conflicted_config_diff.get("modified_items") is None assert conflicted_config_diff.get("added_items") is None # Section "section_1" is removed from the Config _config_3 = _Config._default_config() _config_3._sections[SectionForTest.name] = {"section_1": self.section_1, "section_2": self.section_2} config_diff = Config._comparator._find_conflict_config(_config_3, _config_2) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["removed_items"]) == 1 assert conflicted_config_diff["removed_items"][0] == ( ("section_name", "section_2", None), {"attribute": "2:int", "prop": "prop_2"}, ) assert conflicted_config_diff.get("modified_items") is None assert conflicted_config_diff.get("added_items") is None def test_comparator_with_modified_section(self): _config_1 = _Config._default_config() _config_1._sections[SectionForTest.name] = {"section_2": self.section_2} # All "section_name" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {"section_2": self.section_2b} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["modified_items"]) == 2 assert conflicted_config_diff["modified_items"][0] == ( ("section_name", "section_2", "attribute"), ("2:int", "attribute_2"), ) assert conflicted_config_diff["modified_items"][1] == ( ("section_name", "section_2", "prop"), ("prop_2", "prop_2b"), ) assert conflicted_config_diff.get("removed_items") is None assert conflicted_config_diff.get("added_items") is None def test_comparator_with_modified_list_attribute(self): _config_1 = _Config._default_config() _config_1._sections[SectionForTest.name] = {"section_3": self.section_3} # All "section_name" sections are removed from the Config _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {"section_3": self.section_3b} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["modified_items"]) == 2 assert conflicted_config_diff["modified_items"][0] == ( ("section_name", "section_3", "prop"), (["prop_1"], ["prop_1", "prop_2", "prop_3"]), ) assert conflicted_config_diff["modified_items"][1] == ( ("section_name", "section_3", "attribute"), (["1:int", "2:int", "3:int", "4:int"], ["1:int", "2:int"]), ) assert conflicted_config_diff.get("removed_items") is None assert conflicted_config_diff.get("added_items") is None def test_comparator_with_different_order_list_attributes(self): _config_1 = _Config._default_config() _config_1._unique_sections _config_1._sections[SectionForTest.name] = {"section_3": self.section_3b} # Create _config_2 with different order of list attributes _config_2 = _Config._default_config() _config_2._sections[SectionForTest.name] = {"section_3": self.section_3c} config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) # There should be no difference since the order of list attributes is ignored assert config_diff == {} def test_comparator_with_new_unique_section(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["added_items"]) == 1 assert conflicted_config_diff["added_items"][0] == ( ("unique_section_name", None, None), {"attribute": "unique_attribute_1", "prop": "unique_prop_1"}, ) assert conflicted_config_diff.get("modified_items") is None assert conflicted_config_diff.get("removed_items") is None def test_comparator_with_removed_unique_section(self): _config_1 = _Config._default_config() _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 config_diff = Config._comparator._find_conflict_config(_config_2, _config_1) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["removed_items"]) == 1 assert conflicted_config_diff["removed_items"][0] == ( ("unique_section_name", None, None), {"attribute": "unique_attribute_1", "prop": "unique_prop_1"}, ) assert conflicted_config_diff.get("modified_items") is None assert conflicted_config_diff.get("added_items") is None def test_comparator_with_modified_unique_section(self): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 # All "section_name" sections are removed from the Config _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) conflicted_config_diff = config_diff["conflicted_sections"] assert len(conflicted_config_diff["modified_items"]) == 1 assert conflicted_config_diff["modified_items"][0] == ( ("unique_section_name", "prop", None), ("unique_prop_1", "unique_prop_1b"), ) assert conflicted_config_diff.get("removed_items") is None assert conflicted_config_diff.get("added_items") is None def test_unconflicted_section_name_store_statically(self): Config._comparator._add_unconflicted_section("section_name_1") assert Config._comparator._unconflicted_sections == {"section_name_1"} Config._comparator._add_unconflicted_section("section_name_2") assert Config._comparator._unconflicted_sections == {"section_name_1", "section_name_2"} Config._comparator._add_unconflicted_section("section_name_1") assert Config._comparator._unconflicted_sections == {"section_name_1", "section_name_2"} def test_unconflicted_diff_is_stored_separated_from_conflicted_ones(self): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 _config_1._sections[SectionForTest.name] = {"section_2": self.section_2} _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b _config_2._sections[SectionForTest.name] = {"section_2": self.section_2b} # Compare 2 Configuration config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get("unconflicted_sections") is None assert config_diff.get("conflicted_sections") is not None assert len(config_diff["conflicted_sections"]["modified_items"]) == 3 # Ignore any diff of "section_name" and compare Config._comparator._add_unconflicted_section("section_name") config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get("unconflicted_sections") is not None assert len(config_diff["unconflicted_sections"]["modified_items"]) == 2 assert config_diff.get("conflicted_sections") is not None assert len(config_diff["conflicted_sections"]["modified_items"]) == 1 # Ignore any diff of Global Config and compare Config._comparator._add_unconflicted_section(["unique_section_name"]) config_diff = Config._comparator._find_conflict_config(_config_1, _config_2) assert config_diff.get("unconflicted_sections") is not None assert len(config_diff["unconflicted_sections"]["modified_items"]) == 3 assert config_diff.get("conflicted_sections") is None def test_comparator_log_message(self, caplog): _config_1 = _Config._default_config() _config_1._unique_sections[UniqueSectionForTest.name] = self.unique_section_1 _config_1._sections[SectionForTest.name] = {"section_2": self.section_2} _config_2 = _Config._default_config() _config_2._unique_sections[UniqueSectionForTest.name] = self.unique_section_1b _config_2._sections[SectionForTest.name] = {"section_2": self.section_2b} # Ignore any diff of "section_name" and compare Config._comparator._add_unconflicted_section("section_name") Config._comparator._find_conflict_config(_config_1, _config_2) error_messages = caplog.text.strip().split("\n") assert len(error_messages) == 5 assert all( t in error_messages[0] for t in [ "INFO", "There are non-conflicting changes between the current configuration and the current configuration:", ] ) assert 'section_name "section_2" has attribute "attribute" modified: 2:int -> attribute_2' in error_messages[1] assert 'section_name "section_2" has attribute "prop" modified: prop_2 -> prop_2b' in error_messages[2] assert all( t in error_messages[3] for t in [ "ERROR", "The current configuration conflicts with the current configuration:", ] ) assert 'unique_section_name "prop" was modified: unique_prop_1 -> unique_prop_1b' in error_messages[4] caplog.clear() Config._comparator._find_conflict_config(_config_1, _config_2, old_version_number="1.0") error_messages = caplog.text.strip().split("\n") assert len(error_messages) == 5 assert all( t in error_messages[0] for t in [ "INFO", "There are non-conflicting changes between the configuration for version 1.0 and the current configuration:", ] ) assert all( t in error_messages[3] for t in [ "ERROR", "The configuration for version 1.0 conflicts with the current configuration:", ] ) caplog.clear() Config._comparator._compare( _config_1, _config_2, version_number_1="1.0", version_number_2="2.0", ) error_messages = caplog.text.strip().split("\n") assert len(error_messages) == 3 assert all( t in error_messages[0] for t in ["INFO", "Differences between version 1.0 Configuration and version 2.0 Configuration:"] ) caplog.clear()
|
import os from unittest import mock import pytest from src.taipy.config.exceptions.exceptions import InvalidConfigurationId from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest class WrongUniqueSection(UniqueSectionForTest): name = "1wrong_id" class WrongSection(SectionForTest): name = "correct_name" def test_section_uses_valid_id(): with pytest.raises(InvalidConfigurationId): WrongUniqueSection(attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("wrong id", attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("1wrong_id", attribute="foo") with pytest.raises(InvalidConfigurationId): WrongSection("wrong_@id", attribute="foo") def test_templated_properties_are_replaced(): with mock.patch.dict(os.environ, {"foo": "bar", "baz": "1"}): u_sect = UniqueSectionForTest(attribute="attribute", tpl_property="ENV[foo]") assert u_sect.tpl_property == "bar" sect = SectionForTest(id="my_id", attribute="attribute", tpl_property="ENV[baz]:int") assert sect.tpl_property == 1
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from src.taipy.config.config import Config from src.taipy.config.global_app.global_app_config import GlobalAppConfig from src.taipy.config.section import Section from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def _test_default_global_app_config(global_config: GlobalAppConfig): assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_default_configuration(): default_config = Config._default_config assert default_config._unique_sections is not None assert len(default_config._unique_sections) == 1 assert default_config._unique_sections[UniqueSectionForTest.name] is not None assert default_config._unique_sections[UniqueSectionForTest.name].attribute == "default_attribute" assert default_config._sections is not None assert len(default_config._sections) == 1 _test_default_global_app_config(default_config._global_config) _test_default_global_app_config(Config.global_config) _test_default_global_app_config(GlobalAppConfig().default_config()) def test_register_default_configuration(): Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop1="prop1")) # Replace the first default section Config._register_default(SectionForTest(Section._DEFAULT_KEY, "default_attribute", prop2="prop2")) default_section = Config.sections[SectionForTest.name][Section._DEFAULT_KEY] assert len(default_section.properties) == 1 assert default_section.prop2 == "prop2" assert default_section.prop1 is None
|
import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import LoadingError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_node_can_not_appear_twice(): config = NamedTemporaryFile( """ [unique_section_name] attribute = "my_attribute" [unique_section_name] attribute = "other_attribute" """ ) with pytest.raises(LoadingError, match="Can not load configuration"): Config.load(config.filename) def test_skip_configuration_outside_nodes(): config = NamedTemporaryFile( """ foo = "bar" """ ) Config.load(config.filename) assert Config.global_config.foo is None
|
import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import InconsistentEnvVariableError, MissingEnvVariableError from tests.config.utils.named_temporary_file import NamedTemporaryFile def test_override_default_configuration_with_code_configuration(): assert not Config.global_config.root_folder == "foo" assert len(Config.unique_sections) == 1 assert Config.unique_sections["unique_section_name"] is not None assert Config.unique_sections["unique_section_name"].attribute == "default_attribute" assert Config.unique_sections["unique_section_name"].prop is None assert len(Config.sections) == 1 assert len(Config.sections["section_name"]) == 1 assert Config.sections["section_name"] is not None assert Config.sections["section_name"]["default"].attribute == "default_attribute" Config.configure_global_app(root_folder="foo") assert Config.global_config.root_folder == "foo" Config.configure_unique_section_for_tests("foo", prop="bar") assert len(Config.unique_sections) == 1 assert Config.unique_sections["unique_section_name"] is not None assert Config.unique_sections["unique_section_name"].attribute == "foo" assert Config.unique_sections["unique_section_name"].prop == "bar" Config.configure_section_for_tests("my_id", "baz", prop="qux") assert len(Config.unique_sections) == 1 assert Config.sections["section_name"] is not None assert Config.sections["section_name"]["my_id"].attribute == "baz" assert Config.sections["section_name"]["my_id"].prop == "qux" def test_override_default_config_with_code_config_including_env_variable_values(): Config.configure_global_app() assert Config.global_config.foo is None Config.configure_global_app(foo="bar") assert Config.global_config.foo == "bar" with mock.patch.dict(os.environ, {"FOO": "foo"}): Config.configure_global_app(foo="ENV[FOO]") assert Config.global_config.foo == "foo" def test_override_default_configuration_with_file_configuration(): tf = NamedTemporaryFile( """ [TAIPY] foo = "bar" """ ) assert Config.global_config.foo is None Config.load(tf.filename) assert Config.global_config.foo == "bar" def test_override_default_config_with_file_config_including_env_variable_values(): tf = NamedTemporaryFile( """ [TAIPY] foo_attribute = "ENV[FOO]:int" bar_attribute = "ENV[BAR]:bool" """ ) assert Config.global_config.foo_attribute is None assert Config.global_config.bar_attribute is None with mock.patch.dict(os.environ, {"FOO": "foo", "BAR": "true"}): with pytest.raises(InconsistentEnvVariableError): Config.load(tf.filename) Config.global_config.foo_attribute with mock.patch.dict(os.environ, {"FOO": "5"}): with pytest.raises(MissingEnvVariableError): Config.load(tf.filename) Config.global_config.bar_attribute with mock.patch.dict(os.environ, {"FOO": "6", "BAR": "TRUe"}): Config.load(tf.filename) assert Config.global_config.foo_attribute == 6 assert Config.global_config.bar_attribute def test_code_configuration_does_not_override_file_configuration(): config_from_filename = NamedTemporaryFile( """ [TAIPY] foo = 2 """ ) Config.override(config_from_filename.filename) Config.configure_global_app(foo=21) assert Config.global_config.foo == 2 # From file config def test_code_configuration_does_not_override_file_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( """ [TAIPY] foo = 2 """ ) Config.override(config_from_filename.filename) with mock.patch.dict(os.environ, {"FOO": "21"}): Config.configure_global_app(foo="ENV[FOO]") assert Config.global_config.foo == 2 # From file config def test_file_configuration_overrides_code_configuration(): config_from_filename = NamedTemporaryFile( """ [TAIPY] foo = 2 """ ) Config.configure_global_app(foo=21) Config.load(config_from_filename.filename) assert Config.global_config.foo == 2 # From file config def test_file_configuration_overrides_code_configuration_including_env_variable_values(): config_from_filename = NamedTemporaryFile( """ [TAIPY] foo = "ENV[FOO]:int" """ ) Config.configure_global_app(foo=21) with mock.patch.dict(os.environ, {"FOO": "2"}): Config.load(config_from_filename.filename) assert Config.global_config.foo == 2 # From file config def test_override_default_configuration_with_multiple_configurations(): file_config = NamedTemporaryFile( """ [TAIPY] foo = 10 bar = "baz" """ ) # Default config is applied assert Config.global_config.foo is None assert Config.global_config.bar is None # Code config is applied Config.configure_global_app(foo="bar") assert Config.global_config.foo == "bar" assert Config.global_config.bar is None # File config is applied Config.load(file_config.filename) assert Config.global_config.foo == 10 assert Config.global_config.bar == "baz" def test_override_default_configuration_with_multiple_configurations_including_environment_variable_values(): file_config = NamedTemporaryFile( """ [TAIPY] att = "ENV[BAZ]" """ ) with mock.patch.dict(os.environ, {"FOO": "bar", "BAZ": "qux"}): # Default config is applied assert Config.global_config.att is None # Code config is applied Config.configure_global_app(att="ENV[FOO]") assert Config.global_config.att == "bar" # File config is applied Config.load(file_config.filename) assert Config.global_config.att == "qux"
|
import pytest from src.taipy.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def test_unique_section_registration_and_usage(): assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == "default_attribute" assert Config.unique_sections[UniqueSectionForTest.name].prop is None mySection = Config.configure_unique_section_for_tests(attribute="my_attribute", prop="my_prop") assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert mySection is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == "my_attribute" assert mySection.attribute == "my_attribute" assert Config.unique_sections[UniqueSectionForTest.name].prop == "my_prop" assert mySection.prop == "my_prop" myNewSection = Config.configure_unique_section_for_tests(attribute="my_new_attribute", prop="my_new_prop") assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert myNewSection is not None assert mySection is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == "my_new_attribute" assert myNewSection.attribute == "my_new_attribute" assert mySection.attribute == "my_new_attribute" assert Config.unique_sections[UniqueSectionForTest.name].prop == "my_new_prop" assert myNewSection.prop == "my_new_prop" assert mySection.prop == "my_new_prop" def test_sections_exposed_as_attribute(): assert Config.unique_section_name.attribute == "default_attribute" Config.configure_unique_section_for_tests("my_attribute") assert Config.unique_section_name.attribute == "my_attribute" assert Config.section_name["default"].attribute == "default_attribute" Config.configure_section_for_tests(id="my_id", attribute="my_attribute") assert Config.section_name["my_id"].attribute == "my_attribute" def test_section_registration_and_usage(): assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 1 assert Config.sections[SectionForTest.name]["default"] is not None assert Config.sections[SectionForTest.name]["default"].attribute == "default_attribute" assert Config.sections[SectionForTest.name]["default"].prop == "default_prop" assert Config.sections[SectionForTest.name]["default"].foo is None myFirstSection = Config.configure_section_for_tests(id="first", attribute="my_attribute", prop="my_prop", foo="bar") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name]["default"] is not None assert Config.sections[SectionForTest.name]["default"].attribute == "default_attribute" assert Config.sections[SectionForTest.name]["default"].prop == "default_prop" assert Config.sections[SectionForTest.name]["default"].foo is None assert Config.sections[SectionForTest.name]["first"] is not None assert Config.sections[SectionForTest.name]["first"].attribute == "my_attribute" assert Config.sections[SectionForTest.name]["first"].prop == "my_prop" assert Config.sections[SectionForTest.name]["first"].foo == "bar" assert myFirstSection.attribute == "my_attribute" assert myFirstSection.prop == "my_prop" assert myFirstSection.foo == "bar" myNewSection = Config.configure_section_for_tests(id="second", attribute="my_new_attribute", prop="my_new_prop") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 3 assert Config.sections[SectionForTest.name]["default"] is not None assert Config.sections[SectionForTest.name]["default"].attribute == "default_attribute" assert Config.sections[SectionForTest.name]["default"].prop == "default_prop" assert Config.sections[SectionForTest.name]["default"].foo is None assert Config.sections[SectionForTest.name]["first"] is not None assert Config.sections[SectionForTest.name]["first"].attribute == "my_attribute" assert Config.sections[SectionForTest.name]["first"].prop == "my_prop" assert Config.sections[SectionForTest.name]["first"].foo == "bar" assert Config.sections[SectionForTest.name]["second"] is not None assert Config.sections[SectionForTest.name]["second"].attribute == "my_new_attribute" assert Config.sections[SectionForTest.name]["second"].prop == "my_new_prop" assert Config.sections[SectionForTest.name]["second"].foo is None assert myFirstSection.attribute == "my_attribute" assert myFirstSection.prop == "my_prop" assert myFirstSection.foo == "bar" assert myNewSection.attribute == "my_new_attribute" assert myNewSection.prop == "my_new_prop" assert myNewSection.foo is None my2ndSection = Config.configure_section_for_tests(id="second", attribute="my_2nd_attribute", prop="my_2nd_prop") assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 3 assert Config.sections[SectionForTest.name]["default"] is not None assert Config.sections[SectionForTest.name]["default"].attribute == "default_attribute" assert Config.sections[SectionForTest.name]["default"].prop == "default_prop" assert Config.sections[SectionForTest.name]["default"].foo is None assert Config.sections[SectionForTest.name]["first"] is not None assert Config.sections[SectionForTest.name]["first"].attribute == "my_attribute" assert Config.sections[SectionForTest.name]["first"].prop == "my_prop" assert Config.sections[SectionForTest.name]["first"].foo == "bar" assert Config.sections[SectionForTest.name]["second"] is not None assert Config.sections[SectionForTest.name]["second"].attribute == "my_2nd_attribute" assert Config.sections[SectionForTest.name]["second"].prop == "my_2nd_prop" assert Config.sections[SectionForTest.name]["second"].foo is None assert myFirstSection.attribute == "my_attribute" assert myFirstSection.prop == "my_prop" assert myFirstSection.foo == "bar" assert myNewSection.attribute == "my_2nd_attribute" assert myNewSection.prop == "my_2nd_prop" assert myNewSection.foo is None assert my2ndSection.attribute == "my_2nd_attribute" assert my2ndSection.prop == "my_2nd_prop" assert my2ndSection.foo is None def test_block_registration(): myUniqueSection = Config.configure_unique_section_for_tests(attribute="my_unique_attribute", prop="my_unique_prop") mySection = Config.configure_section_for_tests(id="section_id", attribute="my_attribute", prop="my_prop", foo="bar") Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_unique_section_for_tests(attribute="my_new_unique_attribute", prop="my_new_unique_prop") with pytest.raises(ConfigurationUpdateBlocked): Config.configure_section_for_tests(id="new", attribute="my_attribute", prop="my_prop", foo="bar") with pytest.raises(ConfigurationUpdateBlocked): myUniqueSection.attribute = "foo" with pytest.raises(ConfigurationUpdateBlocked): myUniqueSection.properties = {"foo": "bar"} # myUniqueSection stay the same assert myUniqueSection.attribute == "my_unique_attribute" assert myUniqueSection.properties == {"prop": "my_unique_prop"} with pytest.raises(ConfigurationUpdateBlocked): mySection.attribute = "foo" with pytest.raises(ConfigurationUpdateBlocked): mySection.properties = {"foo": "foo"} # mySection stay the same assert mySection.attribute == "my_attribute" assert mySection.properties == {"prop": "my_prop", "foo": "bar", "prop_int": 0}
|
import pytest from src.taipy.config.config import Config from src.taipy.config.section import Section from tests.config.utils.named_temporary_file import NamedTemporaryFile from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.section_of_sections_list_for_tests import SectionOfSectionsListForTest @pytest.fixture def _init_list_section_for_test(): Config._register_default(SectionOfSectionsListForTest(Section._DEFAULT_KEY, [], prop="default_prop", prop_int=0)) Config.configure_list_section_for_tests = SectionOfSectionsListForTest._configure Config.list_section_name = Config.sections[SectionOfSectionsListForTest.name] def test_applied_config_compilation_does_not_change_other_configs(): assert len(Config._default_config._unique_sections) == 1 assert Config._default_config._unique_sections["unique_section_name"] is not None assert Config._default_config._unique_sections["unique_section_name"].attribute == "default_attribute" assert Config._default_config._unique_sections["unique_section_name"].prop is None assert len(Config._python_config._unique_sections) == 0 assert len(Config._file_config._unique_sections) == 0 assert len(Config._env_file_config._unique_sections) == 0 assert len(Config._applied_config._unique_sections) == 1 assert Config._applied_config._unique_sections["unique_section_name"] is not None assert Config._applied_config._unique_sections["unique_section_name"].attribute == "default_attribute" assert Config._applied_config._unique_sections["unique_section_name"].prop is None assert len(Config.unique_sections) == 1 assert Config.unique_sections["unique_section_name"] is not None assert Config.unique_sections["unique_section_name"].attribute == "default_attribute" assert Config.unique_sections["unique_section_name"].prop is None assert ( Config._applied_config._unique_sections["unique_section_name"] is not Config._default_config._unique_sections["unique_section_name"] ) Config.configure_unique_section_for_tests("qwe", prop="rty") assert len(Config._default_config._unique_sections) == 1 assert Config._default_config._unique_sections["unique_section_name"] is not None assert Config._default_config._unique_sections["unique_section_name"].attribute == "default_attribute" assert Config._default_config._unique_sections["unique_section_name"].prop is None assert len(Config._python_config._unique_sections) == 1 assert Config._python_config._unique_sections["unique_section_name"] is not None assert Config._python_config._unique_sections["unique_section_name"].attribute == "qwe" assert Config._python_config._unique_sections["unique_section_name"].prop == "rty" assert ( Config._python_config._unique_sections["unique_section_name"] != Config._default_config._unique_sections["unique_section_name"] ) assert len(Config._file_config._unique_sections) == 0 assert len(Config._env_file_config._unique_sections) == 0 assert len(Config._applied_config._unique_sections) == 1 assert Config._applied_config._unique_sections["unique_section_name"] is not None assert Config._applied_config._unique_sections["unique_section_name"].attribute == "qwe" assert Config._applied_config._unique_sections["unique_section_name"].prop == "rty" assert ( Config._python_config._unique_sections["unique_section_name"] != Config._applied_config._unique_sections["unique_section_name"] ) assert ( Config._default_config._unique_sections["unique_section_name"] != Config._applied_config._unique_sections["unique_section_name"] ) assert len(Config.unique_sections) == 1 assert Config.unique_sections["unique_section_name"] is not None assert Config.unique_sections["unique_section_name"].attribute == "qwe" assert Config.unique_sections["unique_section_name"].prop == "rty" def test_nested_section_instance_in_python(_init_list_section_for_test): s1_cfg = Config.configure_section_for_tests("s1", attribute="foo") s2_cfg = Config.configure_section_for_tests("s2", attribute="bar") ss_cfg = Config.configure_list_section_for_tests("ss", attribute="foo", sections_list=[s1_cfg, s2_cfg]) s1_config_applied_instance = Config.section_name["s1"] s1_config_python_instance = Config._python_config._sections[SectionForTest.name]["s1"] s2_config_applied_instance = Config.section_name["s2"] s2_config_python_instance = Config._python_config._sections[SectionForTest.name]["s2"] assert ss_cfg.sections_list[0] is s1_config_applied_instance assert ss_cfg.sections_list[0] is not s1_config_python_instance assert ss_cfg.sections_list[1] is s2_config_applied_instance assert ss_cfg.sections_list[1] is not s2_config_python_instance def _configure_in_toml(): return NamedTemporaryFile( content=""" [TAIPY] [section_name.s1] attribute = "foo" [section_name.s2] attribute = "bar" [list_section_name.ss] sections_list = [ "foo", "s1:SECTION", "s2:SECTION"] """ ) def test_nested_section_instance_load_toml(_init_list_section_for_test): toml_config = _configure_in_toml() Config.load(toml_config) s1_config_applied_instance = Config.section_name["s1"] s1_config_python_instance = Config._python_config._sections[SectionForTest.name]["s1"] s2_config_applied_instance = Config.section_name["s2"] s2_config_python_instance = Config._python_config._sections[SectionForTest.name]["s2"] ss_cfg = Config.list_section_name["ss"] assert ss_cfg.sections_list[0] == "foo" assert ss_cfg.sections_list[1] is s1_config_applied_instance assert ss_cfg.sections_list[1] is not s1_config_python_instance assert ss_cfg.sections_list[2] is s2_config_applied_instance assert ss_cfg.sections_list[2] is not s2_config_python_instance def test_nested_section_instance_override_toml(_init_list_section_for_test): toml_config = _configure_in_toml() Config.override(toml_config) s1_config_applied_instance = Config.section_name["s1"] s1_config_python_instance = Config._file_config._sections[SectionForTest.name]["s1"] s2_config_applied_instance = Config.section_name["s2"] s2_config_python_instance = Config._file_config._sections[SectionForTest.name]["s2"] ss_cfg = Config.list_section_name["ss"] assert ss_cfg.sections_list[0] == "foo" assert ss_cfg.sections_list[1] is s1_config_applied_instance assert ss_cfg.sections_list[1] is not s1_config_python_instance assert ss_cfg.sections_list[2] is s2_config_applied_instance assert ss_cfg.sections_list[1] is not s2_config_python_instance
|
import datetime import json import os from unittest import mock from src.taipy.config import Config from src.taipy.config._serializer._json_serializer import _JsonSerializer from src.taipy.config.common.frequency import Frequency from src.taipy.config.common.scope import Scope from tests.config.utils.named_temporary_file import NamedTemporaryFile from tests.config.utils.section_for_tests import SectionForTest from tests.config.utils.unique_section_for_tests import UniqueSectionForTest def add(a, b): return a + b class CustomClass: a = None b = None class CustomEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, datetime): result = {"__type__": "Datetime", "__value__": o.isoformat()} else: result = json.JSONEncoder.default(self, o) return result class CustomDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) def object_hook(self, source): if source.get("__type__") == "Datetime": return datetime.fromisoformat(source.get("__value__")) else: return source def test_write_toml_configuration_file(): expected_toml_config = """ [TAIPY] [unique_section_name] attribute = "my_attribute" prop = "my_prop" prop_int = "1:int" prop_bool = "False:bool" prop_list = [ "p1", "1991-01-01T00:00:00:datetime", "1d0h0m0s:timedelta",] prop_scope = "SCENARIO:SCOPE" prop_freq = "QUARTERLY:FREQUENCY" baz = "ENV[QUX]" quux = "ENV[QUUZ]:bool" corge = [ "grault", "ENV[GARPLY]", "ENV[WALDO]:int", "3.0:float",] [section_name.default] attribute = "default_attribute" prop = "default_prop" prop_int = "0:int" [section_name.my_id] attribute = "my_attribute" prop = "default_prop" prop_int = "1:int" prop_bool = "False:bool" prop_list = [ "unique_section_name:SECTION",] prop_scope = "SCENARIO" baz = "ENV[QUX]" """.strip() tf = NamedTemporaryFile() with mock.patch.dict( os.environ, {"FOO": "in_memory", "QUX": "qux", "QUUZ": "true", "GARPLY": "garply", "WALDO": "17"} ): unique_section = Config.configure_unique_section_for_tests( attribute="my_attribute", prop="my_prop", prop_int=1, prop_bool=False, prop_list=["p1", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1)], prop_scope=Scope.SCENARIO, prop_freq=Frequency.QUARTERLY, baz="ENV[QUX]", quux="ENV[QUUZ]:bool", corge=("grault", "ENV[GARPLY]", "ENV[WALDO]:int", 3.0), ) Config.configure_section_for_tests( "my_id", "my_attribute", prop_int=1, prop_bool=False, prop_list=[unique_section], prop_scope="SCENARIO", baz="ENV[QUX]", ) Config.backup(tf.filename) actual_config = tf.read().strip() assert actual_config == expected_toml_config def test_read_toml_configuration_file(): toml_config = """ [TAIPY] foo = "bar" [unique_section_name] attribute = "my_attribute" prop = "my_prop" prop_int = "1:int" prop_bool = "False:bool" prop_list = [ "p1", "1991-01-01T00:00:00:datetime", "1d0h0m0s:timedelta",] prop_scope = "SCENARIO:SCOPE" prop_freq = "QUARTERLY:FREQUENCY" baz = "ENV[QUX]" quux = "ENV[QUUZ]:bool" corge = [ "grault", "ENV[GARPLY]", "ENV[WALDO]:int", "3.0:float",] [TAIPY.custom_properties] bar = "baz" [section_name.default] attribute = "default_attribute" prop = "default_prop" prop_int = "0:int" [section_name.my_id] attribute = "my_attribute" prop = "default_prop" prop_int = "1:int" prop_bool = "False:bool" prop_list = [ "unique_section_name", "section_name.my_id",] prop_scope = "SCENARIO:SCOPE" baz = "ENV[QUX]" """.strip() tf = NamedTemporaryFile(toml_config) with mock.patch.dict( os.environ, {"FOO": "in_memory", "QUX": "qux", "QUUZ": "true", "GARPLY": "garply", "WALDO": "17"} ): Config.override(tf.filename) assert Config.global_config.foo == "bar" assert Config.global_config.custom_properties.get("bar") == "baz" assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == "my_attribute" assert Config.unique_sections[UniqueSectionForTest.name].prop == "my_prop" assert Config.unique_sections[UniqueSectionForTest.name].prop_int == 1 assert Config.unique_sections[UniqueSectionForTest.name].prop_bool is False assert Config.unique_sections[UniqueSectionForTest.name].prop_list == [ "p1", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1), ] assert Config.unique_sections[UniqueSectionForTest.name].prop_scope == Scope.SCENARIO assert Config.unique_sections[UniqueSectionForTest.name].prop_freq == Frequency.QUARTERLY assert Config.unique_sections[UniqueSectionForTest.name].baz == "qux" assert Config.unique_sections[UniqueSectionForTest.name].quux is True assert Config.unique_sections[UniqueSectionForTest.name].corge == [ "grault", "garply", 17, 3.0, ] assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name]["default"] is not None assert Config.sections[SectionForTest.name]["default"].attribute == "default_attribute" assert Config.sections[SectionForTest.name]["default"].prop == "default_prop" assert Config.sections[SectionForTest.name]["default"].prop_int == 0 assert Config.sections[SectionForTest.name]["my_id"] is not None assert Config.sections[SectionForTest.name]["my_id"].attribute == "my_attribute" assert Config.sections[SectionForTest.name]["my_id"].prop == "default_prop" assert Config.sections[SectionForTest.name]["my_id"].prop_int == 1 assert Config.sections[SectionForTest.name]["my_id"].prop_bool is False assert Config.sections[SectionForTest.name]["my_id"].prop_list == ["unique_section_name", "section_name.my_id"] assert Config.sections[SectionForTest.name]["my_id"].prop_scope == Scope.SCENARIO assert Config.sections[SectionForTest.name]["my_id"].baz == "qux" tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == toml_config def test_read_write_toml_configuration_file_with_function_and_class(): expected_toml_config = """ [TAIPY] [unique_section_name] attribute = "my_attribute" prop = "my_prop" prop_list = [ "tests.config.test_section_serialization.CustomEncoder:class", "tests.config.test_section_serialization.CustomDecoder:class",] [section_name.default] attribute = "default_attribute" prop = "default_prop" prop_int = "0:int" [section_name.my_id] attribute = "my_attribute" prop = "default_prop" prop_int = "0:int" prop_fct_list = [ "tests.config.test_section_serialization.add:function",] prop_class_list = [ "tests.config.test_section_serialization.CustomClass:class",] [section_name.my_id_2] attribute = "my_attribute_2" prop = "default_prop" prop_int = "0:int" prop_fct_list = [ "builtins.print:function", "builtins.pow:function",] """.strip() tf = NamedTemporaryFile() Config.configure_unique_section_for_tests( attribute="my_attribute", prop="my_prop", prop_list=[CustomEncoder, CustomDecoder], ) Config.configure_section_for_tests( "my_id", "my_attribute", prop_fct_list=[add], prop_class_list=[CustomClass], ) Config.configure_section_for_tests( "my_id_2", "my_attribute_2", prop_fct_list=[print, pow], ) Config.backup(tf.filename) actual_exported_toml = tf.read().strip() assert actual_exported_toml == expected_toml_config Config.override(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_exported_toml_2 = tf2.read().strip() assert actual_exported_toml_2 == expected_toml_config def test_write_json_configuration_file(): expected_json_config = """ { "TAIPY": {}, "unique_section_name": { "attribute": "my_attribute", "prop": "my_prop", "prop_int": "1:int", "prop_bool": "False:bool", "prop_list": [ "p1", "1991-01-01T00:00:00:datetime", "1d0h0m0s:timedelta" ], "prop_scope": "SCENARIO:SCOPE", "prop_freq": "QUARTERLY:FREQUENCY" }, "section_name": { "default": { "attribute": "default_attribute", "prop": "default_prop", "prop_int": "0:int" }, "my_id": { "attribute": "my_attribute", "prop": "default_prop", "prop_int": "1:int", "prop_bool": "False:bool", "prop_list": [ "unique_section_name:SECTION" ], "prop_scope": "SCENARIO", "baz": "ENV[QUX]" } } } """.strip() tf = NamedTemporaryFile() Config._serializer = _JsonSerializer() unique_section = Config.configure_unique_section_for_tests( attribute="my_attribute", prop="my_prop", prop_int=1, prop_bool=False, prop_list=["p1", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1)], prop_scope=Scope.SCENARIO, prop_freq=Frequency.QUARTERLY, ) Config.configure_section_for_tests( "my_id", "my_attribute", prop_int=1, prop_bool=False, prop_list=[unique_section], prop_scope="SCENARIO", baz="ENV[QUX]", ) Config.backup(tf.filename) actual_config = tf.read() assert actual_config == expected_json_config def test_read_json_configuration_file(): json_config = """ { "TAIPY": { "root_folder": "./taipy/", "storage_folder": ".data/", "repository_type": "filesystem" }, "unique_section_name": { "attribute": "my_attribute", "prop": "my_prop", "prop_int": "1:int", "prop_bool": "False:bool", "prop_list": [ "p1", "1991-01-01T00:00:00:datetime", "1d0h0m0s:timedelta" ], "prop_scope": "SCENARIO:SCOPE", "prop_freq": "QUARTERLY:FREQUENCY" }, "section_name": { "default": { "attribute": "default_attribute", "prop": "default_prop", "prop_int": "0:int" }, "my_id": { "attribute": "my_attribute", "prop": "default_prop", "prop_int": "1:int", "prop_bool": "False:bool", "prop_list": [ "unique_section_name" ], "prop_scope": "SCENARIO" } } } """.strip() Config._serializer = _JsonSerializer() tf = NamedTemporaryFile(json_config) Config.override(tf.filename) assert Config.unique_sections is not None assert Config.unique_sections[UniqueSectionForTest.name] is not None assert Config.unique_sections[UniqueSectionForTest.name].attribute == "my_attribute" assert Config.unique_sections[UniqueSectionForTest.name].prop == "my_prop" assert Config.unique_sections[UniqueSectionForTest.name].prop_int == 1 assert Config.unique_sections[UniqueSectionForTest.name].prop_bool is False assert Config.unique_sections[UniqueSectionForTest.name].prop_list == [ "p1", datetime.datetime(1991, 1, 1), datetime.timedelta(days=1), ] assert Config.unique_sections[UniqueSectionForTest.name].prop_scope == Scope.SCENARIO assert Config.unique_sections[UniqueSectionForTest.name].prop_freq == Frequency.QUARTERLY assert Config.sections is not None assert len(Config.sections) == 1 assert Config.sections[SectionForTest.name] is not None assert len(Config.sections[SectionForTest.name]) == 2 assert Config.sections[SectionForTest.name]["default"] is not None assert Config.sections[SectionForTest.name]["default"].attribute == "default_attribute" assert Config.sections[SectionForTest.name]["default"].prop == "default_prop" assert Config.sections[SectionForTest.name]["default"].prop_int == 0 assert Config.sections[SectionForTest.name]["my_id"] is not None assert Config.sections[SectionForTest.name]["my_id"].attribute == "my_attribute" assert Config.sections[SectionForTest.name]["my_id"].prop == "default_prop" assert Config.sections[SectionForTest.name]["my_id"].prop_int == 1 assert Config.sections[SectionForTest.name]["my_id"].prop_bool is False assert Config.sections[SectionForTest.name]["my_id"].prop_list == ["unique_section_name"] tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_config_2 = tf2.read().strip() assert actual_config_2 == json_config def test_read_write_json_configuration_file_with_function_and_class(): expected_json_config = """ { "TAIPY": {}, "unique_section_name": { "attribute": "my_attribute", "prop": "my_prop", "prop_list": [ "tests.config.test_section_serialization.CustomEncoder:class", "tests.config.test_section_serialization.CustomDecoder:class" ] }, "section_name": { "default": { "attribute": "default_attribute", "prop": "default_prop", "prop_int": "0:int" }, "my_id": { "attribute": "my_attribute", "prop": "default_prop", "prop_int": "0:int", "prop_fct_list": [ "tests.config.test_section_serialization.add:function" ], "prop_class_list": [ "tests.config.test_section_serialization.CustomClass:class" ] }, "my_id_2": { "attribute": "my_attribute_2", "prop": "default_prop", "prop_int": "0:int", "prop_fct_list": [ "builtins.print:function", "builtins.pow:function" ] } } } """.strip() Config._serializer = _JsonSerializer() tf = NamedTemporaryFile() Config.configure_unique_section_for_tests( attribute="my_attribute", prop="my_prop", prop_list=[CustomEncoder, CustomDecoder], ) Config.configure_section_for_tests( "my_id", "my_attribute", prop_fct_list=[add], prop_class_list=[CustomClass], ) Config.configure_section_for_tests( "my_id_2", "my_attribute_2", prop_fct_list=[print, pow], ) Config.backup(tf.filename) actual_exported_json = tf.read().strip() assert actual_exported_json == expected_json_config Config.override(tf.filename) tf2 = NamedTemporaryFile() Config.backup(tf2.filename) actual_exported_json_2 = tf2.read().strip() assert actual_exported_json_2 == expected_json_config
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from src.taipy.config._config import _Config from src.taipy.config.checker._checker import _Checker class TestDefaultConfigChecker: def test_check_default_config(self): config = _Config._default_config() collector = _Checker._check(config) assert len(collector._errors) == 0 assert len(collector._infos) == 0 assert len(collector._warnings) == 0
|
from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class TestIssueCollector: def test_add_error(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_error("field", "value", "message", "checker") assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") collector._add_error("field", "value", "message", "checker") assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") def test_add_warning(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_warning("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 1 assert len(collector.infos) == 0 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") collector._add_warning("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 2 assert len(collector.infos) == 0 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "field", "value", "message", "checker") def test_add_info(self): collector = IssueCollector() assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert len(collector.all) == 0 collector._add_info("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 1 assert len(collector.all) == 1 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") collector._add_info("field", "value", "message", "checker") assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert len(collector.infos) == 2 assert len(collector.all) == 2 assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker") def test_all(self): collector = IssueCollector() collector._add_info("foo", "bar", "baz", "qux") assert collector.all[0] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_warning("foo2", "bar2", "baz2", "qux2") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[1] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_warning("foo3", "bar3", "baz3", "qux3") assert collector.all[0] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "foo3", "bar3", "baz3", "qux3") assert collector.all[2] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") collector._add_info("field", "value", "message", "checker") collector._add_error("field", "value", "message", "checker") assert collector.all[0] == Issue(IssueCollector._ERROR_LEVEL, "field", "value", "message", "checker") assert collector.all[1] == Issue(IssueCollector._WARNING_LEVEL, "foo2", "bar2", "baz2", "qux2") assert collector.all[2] == Issue(IssueCollector._WARNING_LEVEL, "foo3", "bar3", "baz3", "qux3") assert collector.all[3] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "qux") assert collector.all[4] == Issue(IssueCollector._INFO_LEVEL, "field", "value", "message", "checker")
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import os from unittest import mock from unittest.mock import MagicMock from src.taipy.config import Config from src.taipy.config.checker._checker import _Checker from src.taipy.config.checker.issue_collector import IssueCollector from tests.config.utils.checker_for_tests import CheckerForTest def test_register_checker(): checker = CheckerForTest checker._check = MagicMock() _Checker.add_checker(checker) Config.check() checker._check.assert_called_once()
|
import logging from unittest import mock from src.taipy.config._config import _Config from src.taipy.config.checker._checkers._config_checker import _ConfigChecker from src.taipy.config.checker.issue import Issue from src.taipy.config.checker.issue_collector import IssueCollector class MyCustomChecker(_ConfigChecker): def _check(self) -> IssueCollector: pass def test__error(): with mock.patch.object(logging.Logger, "error"): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._error("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.errors) == 1 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._error("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.errors) == 2 assert len(collector.warnings) == 0 assert len(collector.infos) == 0 assert collector.errors[0] == Issue(IssueCollector._ERROR_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.errors[1] == Issue(IssueCollector._ERROR_LEVEL, "foo", "bar", "baz", "MyCustomChecker") def test__warning(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._warning("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.warnings) == 1 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._warning("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.warnings) == 2 assert len(collector.errors) == 0 assert len(collector.infos) == 0 assert collector.warnings[0] == Issue(IssueCollector._WARNING_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.warnings[1] == Issue(IssueCollector._WARNING_LEVEL, "foo", "bar", "baz", "MyCustomChecker") def test__info(): collector = IssueCollector() assert len(collector.all) == 0 _ConfigChecker(_Config(), collector)._info("field", 17, "my message") assert len(collector.all) == 1 assert len(collector.infos) == 1 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, "field", 17, "my message", "_ConfigChecker") MyCustomChecker(_Config(), collector)._info("foo", "bar", "baz") assert len(collector.all) == 2 assert len(collector.infos) == 2 assert len(collector.errors) == 0 assert len(collector.warnings) == 0 assert collector.infos[0] == Issue(IssueCollector._INFO_LEVEL, "field", 17, "my message", "_ConfigChecker") assert collector.infos[1] == Issue(IssueCollector._INFO_LEVEL, "foo", "bar", "baz", "MyCustomChecker")
|
from src.taipy.config import IssueCollector from src.taipy.config.checker._checkers._config_checker import _ConfigChecker class CheckerForTest(_ConfigChecker): def _check(self) -> IssueCollector: return self._collector
|
from copy import copy from typing import Any, Dict, List, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from .section_for_tests import SectionForTest class SectionOfSectionsListForTest(Section): name = "list_section_name" _MY_ATTRIBUTE_KEY = "attribute" _SECTIONS_LIST_KEY = "sections_list" def __init__(self, id: str, attribute: Any = None, sections_list: List = None, **properties): self._attribute = attribute self._sections_list = sections_list if sections_list else [] super().__init__(id, **properties) def __copy__(self): return SectionOfSectionsListForTest( self.id, self._attribute, copy(self._sections_list), **copy(self._properties) ) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val @property def sections_list(self): return list(self._sections_list) @sections_list.setter # type: ignore @_ConfigBlocker._check() def sections_list(self, val): self._sections_list = val def _clean(self): self._attribute = None self._sections_list = [] self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute if self._sections_list: as_dict[self._SECTIONS_LIST_KEY] = self._sections_list as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) section_configs = config._sections.get(SectionForTest.name, None) or [] # type: ignore sections_list = [] if inputs_as_str := as_dict.pop(cls._SECTIONS_LIST_KEY, None): for section_id in inputs_as_str: if section_id in section_configs: sections_list.append(section_configs[section_id]) else: sections_list.append(section_id) return SectionOfSectionsListForTest(id=id, attribute=attribute, sections_list=sections_list, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._sections_list = as_dict.pop(self._SECTIONS_LIST_KEY, self._sections_list) if self._sections_list is None and default_section: self._sections_list = default_section._sections_list self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, sections_list: List = None, **properties): section = SectionOfSectionsListForTest(id, attribute, sections_list, **properties) Config._register(section) return Config.sections[SectionOfSectionsListForTest.name][id]
|
import os import tempfile class NamedTemporaryFile: def __init__(self, content=None): with tempfile.NamedTemporaryFile("w", delete=False) as fd: if content: fd.write(content) self.filename = fd.name def read(self): with open(self.filename, "r") as fp: return fp.read() def __del__(self): os.unlink(self.filename)
|
from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config, Section from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker class SectionForTest(Section): name = "section_name" _MY_ATTRIBUTE_KEY = "attribute" def __init__(self, id: str, attribute: Any = None, **properties): self._attribute = attribute super().__init__(id, **properties) def __copy__(self): return SectionForTest(self.id, self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id: str, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, id) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return SectionForTest(id=id, attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(id: str, attribute: str, **properties): section = SectionForTest(id, attribute, **properties) Config._register(section) return Config.sections[SectionForTest.name][id]
|
from copy import copy from typing import Any, Dict, Optional from src.taipy.config import Config from src.taipy.config._config import _Config from src.taipy.config.common._config_blocker import _ConfigBlocker from src.taipy.config.unique_section import UniqueSection class UniqueSectionForTest(UniqueSection): name = "unique_section_name" _MY_ATTRIBUTE_KEY = "attribute" def __init__(self, attribute: str = None, **properties): self._attribute = attribute super().__init__(**properties) def __copy__(self): return UniqueSectionForTest(self._attribute, **copy(self._properties)) @property def attribute(self): return self._replace_templates(self._attribute) @attribute.setter # type: ignore @_ConfigBlocker._check() def attribute(self, val): self._attribute = val def _clean(self): self._attribute = None self._properties.clear() def _to_dict(self): as_dict = {} if self._attribute is not None: as_dict[self._MY_ATTRIBUTE_KEY] = self._attribute as_dict.update(self._properties) return as_dict @classmethod def _from_dict(cls, as_dict: Dict[str, Any], id=None, config: Optional[_Config] = None): as_dict.pop(cls._ID_KEY, None) attribute = as_dict.pop(cls._MY_ATTRIBUTE_KEY, None) return UniqueSectionForTest(attribute=attribute, **as_dict) def _update(self, as_dict: Dict[str, Any], default_section=None): self._attribute = as_dict.pop(self._MY_ATTRIBUTE_KEY, self._attribute) if self._attribute is None and default_section: self._attribute = default_section._attribute self._properties.update(as_dict) if default_section: self._properties = {**default_section.properties, **self._properties} @staticmethod def _configure(attribute: str, **properties): section = UniqueSectionForTest(attribute, **properties) Config._register(section) return Config.unique_sections[UniqueSectionForTest.name]
|
import pytest from src.taipy.config.common._validate_id import _validate_id from src.taipy.config.exceptions.exceptions import InvalidConfigurationId class TestId: def test_validate_id(self): s = _validate_id("foo") assert s == "foo" with pytest.raises(InvalidConfigurationId): _validate_id("1foo") with pytest.raises(InvalidConfigurationId): _validate_id("foo bar") with pytest.raises(InvalidConfigurationId): _validate_id("foo/foo$") with pytest.raises(InvalidConfigurationId): _validate_id("") with pytest.raises(InvalidConfigurationId): _validate_id(" ") with pytest.raises(InvalidConfigurationId): _validate_id("class") with pytest.raises(InvalidConfigurationId): _validate_id("def") with pytest.raises(InvalidConfigurationId): _validate_id("with") with pytest.raises(InvalidConfigurationId): _validate_id("CYCLE") with pytest.raises(InvalidConfigurationId): _validate_id("SCENARIO") with pytest.raises(InvalidConfigurationId): _validate_id("SEQUENCE") with pytest.raises(InvalidConfigurationId): _validate_id("TASK") with pytest.raises(InvalidConfigurationId): _validate_id("DATANODE")
|
import pytest from src.taipy.config.common.scope import Scope def test_scope(): # Test __ge__ method assert Scope.GLOBAL >= Scope.GLOBAL assert Scope.GLOBAL >= Scope.CYCLE assert Scope.CYCLE >= Scope.CYCLE assert Scope.GLOBAL >= Scope.SCENARIO assert Scope.CYCLE >= Scope.SCENARIO assert Scope.SCENARIO >= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO >= "testing string" # Test __gt__ method assert Scope.GLOBAL > Scope.CYCLE assert Scope.GLOBAL > Scope.SCENARIO assert Scope.CYCLE > Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO > "testing string" # Test __le__ method assert Scope.GLOBAL <= Scope.GLOBAL assert Scope.CYCLE <= Scope.GLOBAL assert Scope.CYCLE <= Scope.CYCLE assert Scope.SCENARIO <= Scope.GLOBAL assert Scope.SCENARIO <= Scope.CYCLE assert Scope.SCENARIO <= Scope.SCENARIO with pytest.raises(TypeError): assert Scope.SCENARIO <= "testing string" # Test __lt__ method assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.GLOBAL assert Scope.SCENARIO < Scope.CYCLE with pytest.raises(TypeError): assert Scope.SCENARIO < "testing string"
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import argparse import re import sys import pytest from src.taipy._cli._base_cli import _CLI if sys.version_info >= (3, 10): argparse_options_str = "options:" else: argparse_options_str = "optional arguments:" def preprocess_stdout(stdout): stdout = stdout.replace("\n", " ").replace("\t", " ") return re.sub(" +", " ", stdout) def remove_subparser(name: str): """Remove a subparser from argparse.""" _CLI._sub_taipyparsers.pop(name, None) if _CLI._subparser_action: _CLI._subparser_action._name_parser_map.pop(name, None) for action in _CLI._subparser_action._choices_actions: if action.dest == name: _CLI._subparser_action._choices_actions.remove(action) @pytest.fixture(autouse=True, scope="function") def clean_argparser(): _CLI._parser = argparse.ArgumentParser(conflict_handler="resolve") _CLI._arg_groups = {} subcommands = list(_CLI._sub_taipyparsers.keys()) for subcommand in subcommands: remove_subparser(subcommand) yield def test_subparser(capfd): subcommand_1 = _CLI._add_subparser("subcommand_1", help="subcommand_1 help") subcommand_1.add_argument("--foo", "-f", help="foo help") subcommand_1.add_argument("--bar", "-b", help="bar help") subcommand_2 = _CLI._add_subparser("subcommand_2", help="subcommand_2 help") subcommand_2.add_argument("--doo", "-d", help="doo help") subcommand_2.add_argument("--baz", "-z", help="baz help") expected_subcommand_1_help_message = f"""subcommand_1 [-h] [--foo FOO] [--bar BAR] {argparse_options_str} -h, --help show this help message and exit --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help """ subcommand_1.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_1_help_message) in preprocess_stdout(stdout) expected_subcommand_2_help_message = f"""subcommand_2 [-h] [--doo DOO] [--baz BAZ] {argparse_options_str} -h, --help show this help message and exit --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help """ subcommand_2.print_help() stdout, _ = capfd.readouterr() assert preprocess_stdout(expected_subcommand_2_help_message) in preprocess_stdout(stdout) def test_duplicate_subcommand(): subcommand_1 = _CLI._add_subparser("subcommand_1", help="subcommand_1 help") subcommand_1.add_argument("--foo", "-f", help="foo help") subcommand_2 = _CLI._add_subparser("subcommand_1", help="subcommand_2 help") subcommand_2.add_argument("--bar", "-b", help="bar help") # The title of subcommand_2 is duplicated with subcommand_1, and therefore # there will be no new subcommand created assert len(_CLI._sub_taipyparsers) == 1 def test_groupparser(capfd): group_1 = _CLI._add_groupparser("group_1", "group_1 desc") group_1.add_argument("--foo", "-f", help="foo help") group_1.add_argument("--bar", "-b", help="bar help") group_2 = _CLI._add_groupparser("group_2", "group_2 desc") group_2.add_argument("--doo", "-d", help="doo help") group_2.add_argument("--baz", "-z", help="baz help") expected_help_message = """ group_1: group_1 desc --foo FOO, -f FOO foo help --bar BAR, -b BAR bar help group_2: group_2 desc --doo DOO, -d DOO doo help --baz BAZ, -z BAZ baz help """.strip() _CLI._parser.print_help() stdout, _ = capfd.readouterr() assert expected_help_message in stdout def test_duplicate_group(): group_1 = _CLI._add_groupparser("group_1", "group_1 desc") group_1.add_argument("--foo", "-f", help="foo help") group_2 = _CLI._add_groupparser("group_1", "group_2 desc") group_2.add_argument("--bar", "-b", help="bar help") # The title of group_2 is duplicated with group_1, and therefore # there will be no new group created assert len(_CLI._arg_groups) == 1
|
import datetime import os from unittest import mock import pytest from src.taipy.config.common.frequency import Frequency from src.taipy.config.common.scope import Scope from src.taipy.config.common._template_handler import _TemplateHandler from src.taipy.config.exceptions.exceptions import InconsistentEnvVariableError def test_replace_if_template(): assert_does_not_change("123") assert_does_not_change("foo") assert_does_not_change("_foo") assert_does_not_change("_foo_") assert_does_not_change("foo_") assert_does_not_change("foo") assert_does_not_change("foo_1") assert_does_not_change("1foo_1") assert_does_not_change("env(foo)") assert_does_not_change("env<foo>") assert_does_not_change("env[foo]") assert_does_not_change("Env[foo]") assert_does_not_change("ENV[1foo]") assert_does_not_change("123:bool") assert_does_not_change("foo:bool") assert_does_not_change("_foo:bool") assert_does_not_change("_foo_:bool") assert_does_not_change("foo_:bool") assert_does_not_change("foo:bool") assert_does_not_change("foo_1:bool") assert_does_not_change("1foo_1:bool") assert_does_not_change("env(foo):bool") assert_does_not_change("env<foo>:bool") assert_does_not_change("env[foo]:bool") assert_does_not_change("Env[foo]:bool") assert_does_not_change("ENV[1foo]:bool") assert_does_not_change("ENV[foo]:") assert_does_not_change("ENV[_foo]:") assert_does_not_change("ENV[foo_]:") assert_does_not_change("ENV[foo0]:") assert_does_not_change("ENV[foo_0]:") assert_does_not_change("ENV[_foo_0]:") assert_does_not_change("ENV[foo]:foo") assert_does_not_change("ENV[_foo]:foo") assert_does_not_change("ENV[foo_]:foo") assert_does_not_change("ENV[foo0]:foo") assert_does_not_change("ENV[foo_0]:foo") assert_does_not_change("ENV[_foo_0]:foo") assert_does_replace("ENV[foo]", "foo", "VALUE", str) assert_does_replace("ENV[_foo]", "_foo", "VALUE", str) assert_does_replace("ENV[foo_]", "foo_", "VALUE", str) assert_does_replace("ENV[foo0]", "foo0", "VALUE", str) assert_does_replace("ENV[foo_0]", "foo_0", "VALUE", str) assert_does_replace("ENV[_foo_0]", "_foo_0", "VALUE", str) assert_does_replace("ENV[foo]:str", "foo", "VALUE", str) assert_does_replace("ENV[_foo]:str", "_foo", "VALUE", str) assert_does_replace("ENV[foo_]:str", "foo_", "VALUE", str) assert_does_replace("ENV[foo0]:str", "foo0", "VALUE", str) assert_does_replace("ENV[foo_0]:str", "foo_0", "VALUE", str) assert_does_replace("ENV[_foo_0]:str", "_foo_0", "VALUE", str) assert_does_replace("ENV[foo]:int", "foo", "1", int) assert_does_replace("ENV[_foo]:int", "_foo", "1", int) assert_does_replace("ENV[foo_]:int", "foo_", "1", int) assert_does_replace("ENV[foo0]:int", "foo0", "1", int) assert_does_replace("ENV[foo_0]:int", "foo_0", "1", int) assert_does_replace("ENV[_foo_0]:int", "_foo_0", "1", int) assert_does_replace("ENV[foo]:float", "foo", "1.", float) assert_does_replace("ENV[_foo]:float", "_foo", "1.", float) assert_does_replace("ENV[foo_]:float", "foo_", "1.", float) assert_does_replace("ENV[foo0]:float", "foo0", "1.", float) assert_does_replace("ENV[foo_0]:float", "foo_0", "1.", float) assert_does_replace("ENV[_foo_0]:float", "_foo_0", "1.", float) assert_does_replace("ENV[foo]:bool", "foo", "True", bool) assert_does_replace("ENV[_foo]:bool", "_foo", "True", bool) assert_does_replace("ENV[foo_]:bool", "foo_", "True", bool) assert_does_replace("ENV[foo0]:bool", "foo0", "True", bool) assert_does_replace("ENV[foo_0]:bool", "foo_0", "True", bool) assert_does_replace("ENV[_foo_0]:bool", "_foo_0", "True", bool) def assert_does_replace(template, env_variable_name, replaced_by, as_type): with mock.patch.dict(os.environ, {env_variable_name: replaced_by}): tpl = _TemplateHandler() assert tpl._replace_templates(template) == as_type(replaced_by) def assert_does_not_change(template): tpl = _TemplateHandler() assert tpl._replace_templates(template) == template def test_replace_tuple_list_dict(): with mock.patch.dict(os.environ, {"FOO": "true", "BAR": "3", "BAZ": "qux"}): tpl = _TemplateHandler() now = datetime.datetime.now() actual = tpl._replace_templates(("ENV[FOO]:bool", now, "ENV[BAR]:int", "ENV[BAZ]", "quz")) assert actual == (True, now, 3, "qux", "quz") actual = tpl._replace_templates(("ENV[FOO]:bool", now, "ENV[BAR]:int", "ENV[BAZ]", "quz")) assert actual == (True, now, 3, "qux", "quz") def test_to_bool(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool("okhds") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool("no") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool("tru") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_bool("tru_e") assert _TemplateHandler._to_bool("true") assert _TemplateHandler._to_bool("True") assert _TemplateHandler._to_bool("TRUE") assert _TemplateHandler._to_bool("TruE") assert _TemplateHandler._to_bool("TrUE") assert not _TemplateHandler._to_bool("false") assert not _TemplateHandler._to_bool("False") assert not _TemplateHandler._to_bool("FALSE") assert not _TemplateHandler._to_bool("FalSE") assert not _TemplateHandler._to_bool("FalSe") def test_to_int(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int("okhds") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int("_45") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_int("12.5") assert 12 == _TemplateHandler._to_int("12") assert 0 == _TemplateHandler._to_int("0") assert -2 == _TemplateHandler._to_int("-2") assert 156165 == _TemplateHandler._to_int("156165") def test_to_float(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_float("okhds") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_float("_45") assert 12.5 == _TemplateHandler._to_float("12.5") assert 2.0 == _TemplateHandler._to_float("2") assert 0.0 == _TemplateHandler._to_float("0") assert -2.1 == _TemplateHandler._to_float("-2.1") assert 156165.3 == _TemplateHandler._to_float("156165.3") def test_to_scope(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_scope("okhds") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_scope("plop") assert Scope.GLOBAL == _TemplateHandler._to_scope("global") assert Scope.GLOBAL == _TemplateHandler._to_scope("GLOBAL") assert Scope.SCENARIO == _TemplateHandler._to_scope("SCENARIO") assert Scope.CYCLE == _TemplateHandler._to_scope("cycle") def test_to_frequency(): with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_frequency("okhds") with pytest.raises(InconsistentEnvVariableError): _TemplateHandler._to_frequency("plop") assert Frequency.DAILY == _TemplateHandler._to_frequency("DAILY") assert Frequency.DAILY == _TemplateHandler._to_frequency("Daily") assert Frequency.WEEKLY == _TemplateHandler._to_frequency("weekly") assert Frequency.WEEKLY == _TemplateHandler._to_frequency("WEEKLY") assert Frequency.MONTHLY == _TemplateHandler._to_frequency("Monthly") assert Frequency.MONTHLY == _TemplateHandler._to_frequency("MONThLY") assert Frequency.QUARTERLY == _TemplateHandler._to_frequency("QuaRtERlY") assert Frequency.YEARLY == _TemplateHandler._to_frequency("Yearly")
|
import pytest from src.taipy.config.common._classproperty import _Classproperty class TestClassProperty: def test_class_property(self): class TestClass: @_Classproperty def test_property(cls): return "test_property" assert TestClass.test_property == "test_property" assert TestClass().test_property == "test_property" with pytest.raises(TypeError): TestClass.test_property()
|
import os from unittest import mock import pytest from src.taipy.config.config import Config from src.taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked def test_global_config_with_env_variable_value(): with mock.patch.dict(os.environ, {"FOO": "bar", "BAZ": "qux"}): Config.configure_global_app(foo="ENV[FOO]", bar="ENV[BAZ]") assert Config.global_config.foo == "bar" assert Config.global_config.bar == "qux" def test_default_global_app_config(): global_config = Config.global_config assert global_config is not None assert not global_config.notification assert len(global_config.properties) == 0 def test_block_update_global_app_config(): Config.block_update() with pytest.raises(ConfigurationUpdateBlocked): Config.configure_global_app(foo="bar") with pytest.raises(ConfigurationUpdateBlocked): Config.global_config.properties = {"foo": "bar"} # Test if the global_config stay as default assert Config.global_config.foo is None assert len(Config.global_config.properties) == 0
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
from importlib.util import find_spec if find_spec("taipy"): if find_spec("taipy.config"): from taipy.config._init import * # type: ignore if find_spec("taipy.gui"): from taipy.gui._init import * # type: ignore if find_spec("taipy.core"): from taipy.core._init import * # type: ignore if find_spec("taipy.rest"): from taipy.rest._init import * # type: ignore if find_spec("taipy.gui_core"): from taipy.gui_core._init import * # type: ignore if find_spec("taipy.enterprise"): from taipy.enterprise._init import * # type: ignore if find_spec("taipy._run"): from taipy._run import _run as run # type: ignore
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import logging.config import os import sys class _TaipyLogger: _ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH = "TAIPY_LOGGER_CONFIG_PATH" __logger = None @classmethod def _get_logger(cls): cls._ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH = "TAIPY_LOGGER_CONFIG_PATH" if cls.__logger: return cls.__logger if config_filename := os.environ.get(cls._ENVIRONMENT_VARIABLE_NAME_WITH_LOGGER_CONFIG_PATH): logging.config.fileConfig(config_filename) cls.__logger = logging.getLogger("Taipy") else: cls.__logger = logging.getLogger("Taipy") cls.__logger.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) cls.__logger.addHandler(ch) return cls.__logger
|
import os from typing import Dict from ..logger._taipy_logger import _TaipyLogger from ._config import _Config from ._config_comparator._config_comparator import _ConfigComparator from ._serializer._json_serializer import _JsonSerializer from ._serializer._toml_serializer import _TomlSerializer from .checker._checker import _Checker from .checker.issue_collector import IssueCollector from .common._classproperty import _Classproperty from .common._config_blocker import _ConfigBlocker from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection class Config: """Configuration singleton.""" _ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH = "TAIPY_CONFIG_PATH" __logger = _TaipyLogger._get_logger() _default_config = _Config._default_config() _python_config = _Config() _file_config = _Config() _env_file_config = _Config() _applied_config = _Config() _collector = IssueCollector() _serializer = _TomlSerializer() __json_serializer = _JsonSerializer() _comparator: _ConfigComparator = _ConfigComparator() @_Classproperty def unique_sections(cls) -> Dict[str, UniqueSection]: """Return all unique sections.""" return cls._applied_config._unique_sections @_Classproperty def sections(cls) -> Dict[str, Dict[str, Section]]: """Return all non unique sections.""" return cls._applied_config._sections @_Classproperty def global_config(cls) -> GlobalAppConfig: """Return configuration values related to the global application as a `GlobalAppConfig^`.""" return cls._applied_config._global_config @classmethod @_ConfigBlocker._check() def load(cls, filename): """Load a configuration file. The current Python configuration is replaced and the Config compilation is triggered. Parameters: filename (Union[str, Path]): The path of the toml configuration file to load. """ cls.__logger.info(f"Loading configuration. Filename: '{filename}'") cls._python_config = cls._serializer._read(filename) cls._compile_configs() cls.__logger.info(f"Configuration '{filename}' successfully loaded.") @classmethod def export(cls, filename): """Export a configuration. The export is done in a toml file. The exported configuration is taken from the Python code configuration. Parameters: filename (Union[str, Path]): The path of the file to export. Note: If *filename* already exists, it is overwritten. """ cls._serializer._write(cls._python_config, filename) @classmethod def backup(cls, filename): """Backup a configuration. The backup is done in a toml file. The backed up configuration is a compilation from the three possible methods to configure the application: the Python code configuration, the file configuration and the environment configuration. Parameters: filename (Union[str, Path]): The path of the file to export. Note: If *filename* already exists, it is overwritten. """ cls._serializer._write(cls._applied_config, filename) @classmethod @_ConfigBlocker._check() def restore(cls, filename): """Restore a configuration file and replace the current applied configuration. Parameters: filename (Union[str, Path]): The path of the toml configuration file to load. """ cls.__logger.info(f"Restoring configuration. Filename: '{filename}'") cls._applied_config = cls._serializer._read(filename) cls.__logger.info(f"Configuration '{filename}' successfully restored.") @classmethod @_ConfigBlocker._check() def override(cls, filename): """Load a configuration from a file and overrides the current config. Parameters: filename (Union[str, Path]): The path of the toml configuration file to load. """ cls.__logger.info(f"Loading configuration. Filename: '{filename}'") cls._file_config = cls._serializer._read(filename) cls.__logger.info("Overriding configuration.'") cls._compile_configs() cls.__logger.info(f"Configuration '{filename}' successfully loaded.") @classmethod def block_update(cls): """Block update on the configuration signgleton.""" _ConfigBlocker._block() @classmethod def unblock_update(cls): """Unblock update on the configuration signgleton.""" _ConfigBlocker._unblock() @classmethod @_ConfigBlocker._check() def configure_global_app(cls, **properties) -> GlobalAppConfig: """Configure the global application. Parameters: **properties (Dict[str, Any]): A dictionary of additional properties. Returns: The global application configuration. """ glob_cfg = GlobalAppConfig(**properties) if cls._python_config._global_config is None: cls._python_config._global_config = glob_cfg else: cls._python_config._global_config._update(glob_cfg._to_dict()) cls._compile_configs() return cls._applied_config._global_config @classmethod def check(cls) -> IssueCollector: """Check configuration. This method logs issue messages and returns an issue collector. Returns: Collector containing the info, warning and error issues. """ cls._collector = _Checker._check(cls._applied_config) cls.__log_message(cls) return cls._collector @classmethod @_ConfigBlocker._check() def _register_default(cls, default_section: Section): if isinstance(default_section, UniqueSection): if cls._default_config._unique_sections.get(default_section.name, None): cls._default_config._unique_sections[default_section.name]._update(default_section._to_dict()) else: cls._default_config._unique_sections[default_section.name] = default_section else: if def_sections := cls._default_config._sections.get(default_section.name, None): def_sections[default_section.id] = default_section else: cls._default_config._sections[default_section.name] = {default_section.id: default_section} cls._serializer._section_class[default_section.name] = default_section.__class__ # type: ignore cls.__json_serializer._section_class[default_section.name] = default_section.__class__ # type: ignore cls._compile_configs() @classmethod @_ConfigBlocker._check() def _register(cls, section): if isinstance(section, UniqueSection): if cls._python_config._unique_sections.get(section.name, None): cls._python_config._unique_sections[section.name]._update(section._to_dict()) else: cls._python_config._unique_sections[section.name] = section else: if sections := cls._python_config._sections.get(section.name, None): if sections.get(section.id, None): sections[section.id]._update(section._to_dict()) else: sections[section.id] = section else: cls._python_config._sections[section.name] = {section.id: section} cls._serializer._section_class[section.name] = section.__class__ cls.__json_serializer._section_class[section.name] = section.__class__ cls._compile_configs() @classmethod def _override_env_file(cls): if config_filename := os.environ.get(cls._ENVIRONMENT_VARIABLE_NAME_WITH_CONFIG_PATH): cls.__logger.info(f"Loading configuration provided by environment variable. Filename: '{config_filename}'") cls._env_file_config = cls._serializer._read(config_filename) cls.__logger.info(f"Configuration '{config_filename}' successfully loaded.") @classmethod def _compile_configs(cls): Config._override_env_file() cls._applied_config._clean() if cls._default_config: cls._applied_config._update(cls._default_config) if cls._python_config: cls._applied_config._update(cls._python_config) if cls._file_config: cls._applied_config._update(cls._file_config) if cls._env_file_config: cls._applied_config._update(cls._env_file_config) @classmethod def __log_message(cls, config): for issue in config._collector._warnings: cls.__logger.warning(str(issue)) for issue in config._collector._infos: cls.__logger.info(str(issue)) for issue in config._collector._errors: cls.__logger.error(str(issue)) if len(config._collector._errors) != 0: raise SystemExit("Configuration errors found. Please check the error log for more information.") @classmethod def _to_json(cls, _config: _Config) -> str: return cls.__json_serializer._serialize(_config) @classmethod def _from_json(cls, config_as_str: str) -> _Config: return cls.__json_serializer._deserialize(config_as_str) Config._override_env_file()
|
import json import os def _get_version(): with open(f"{os.path.dirname(os.path.abspath(__file__))}{os.sep}version.json") as version_file: version = json.load(version_file) version_string = f'{version.get("major", 0)}.{version.get("minor", 0)}.{version.get("patch", 0)}' if vext := version.get("ext"): version_string = f"{version_string}.{vext}" return version_string
|
from .config import Config from .common.frequency import Frequency from .common.scope import Scope
|
"""# Taipy Config The Taipy Config package is a Python library designed to configure a Taipy application. The main entrypoint is the `Config^` singleton class. It exposes some methods to configure the Taipy application and some attributes to retrieve the configuration values. """ from ._init import * from typing import List from .checker.issue import Issue from .checker.issue_collector import IssueCollector from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection from .version import _get_version __version__ = _get_version() def _config_doc(func): def func_with_doc(section, attribute_name, default, configuration_methods, add_to_unconflicted_sections=False): import os if os.environ.get("GENERATING_TAIPY_DOC", None) and os.environ["GENERATING_TAIPY_DOC"] == "true": with open("config_doc.txt", "a") as f: from inspect import signature for exposed_configuration_method, configuration_method in configuration_methods: annotation = " @staticmethod\n" sign = " def " + exposed_configuration_method + str(signature(configuration_method)) + ":\n" doc = ' """' + configuration_method.__doc__ + '"""\n' content = " pass\n\n" f.write(annotation + sign + doc + content) return func(section, attribute_name, default, configuration_methods, add_to_unconflicted_sections) return func_with_doc @_config_doc def _inject_section( section_clazz, attribute_name: str, default: Section, configuration_methods: List[tuple], add_to_unconflicted_sections: bool = False, ): Config._register_default(default) if issubclass(section_clazz, UniqueSection): setattr(Config, attribute_name, Config.unique_sections[section_clazz.name]) elif issubclass(section_clazz, Section): setattr(Config, attribute_name, Config.sections[section_clazz.name]) else: raise TypeError if add_to_unconflicted_sections: Config._comparator._add_unconflicted_section(section_clazz.name) for exposed_configuration_method, configuration_method in configuration_methods: setattr(Config, exposed_configuration_method, configuration_method)
|
from abc import abstractmethod from typing import Any, Dict, Optional from .common._config_blocker import _ConfigBlocker from .common._template_handler import _TemplateHandler as _tpl from .common._validate_id import _validate_id class Section: """A Section as a consistent part of the Config. A section is defined by the section name (representing the type of objects that are configured) and a section id. """ _DEFAULT_KEY = "default" _ID_KEY = "id" def __init__(self, id, **properties): self.id = _validate_id(id) self._properties = properties or dict() @abstractmethod def __copy__(self): raise NotImplementedError @property @abstractmethod def name(self): raise NotImplementedError @abstractmethod def _clean(self): raise NotImplementedError @abstractmethod def _to_dict(self): raise NotImplementedError @classmethod @abstractmethod def _from_dict(cls, config_as_dict: Dict[str, Any], id, config): raise NotImplementedError @abstractmethod def _update(self, config_as_dict, default_section=None): raise NotImplementedError def __getattr__(self, item: str) -> Optional[Any]: return self._replace_templates(self._properties.get(item, None)) @property def properties(self): return {k: _tpl._replace_templates(v) for k, v in self._properties.items()} @properties.setter # type: ignore @_ConfigBlocker._check() def properties(self, val): self._properties = val def _replace_templates(self, value): return _tpl._replace_templates(value)
|
from abc import ABC from .common._validate_id import _validate_id from .section import Section class UniqueSection(Section, ABC): """A UniqueSection is a configuration `Section^` that can have only one instance. A UniqueSection is only defined by the section name. """ def __init__(self, **properties): super().__init__(self.name, **properties)
|
from copy import copy from typing import Dict from .global_app.global_app_config import GlobalAppConfig from .section import Section from .unique_section import UniqueSection class _Config: DEFAULT_KEY = "default" def __init__(self): self._sections: Dict[str, Dict[str, Section]] = {} self._unique_sections: Dict[str, UniqueSection] = {} self._global_config: GlobalAppConfig = GlobalAppConfig() def _clean(self): self._global_config._clean() for unique_section in self._unique_sections.values(): unique_section._clean() for sections in self._sections.values(): for section in sections.values(): section._clean() @classmethod def _default_config(cls): config = _Config() config._global_config = GlobalAppConfig.default_config() return config def _update(self, other_config): self._global_config._update(other_config._global_config._to_dict()) if other_config._unique_sections: for section_name, other_section in other_config._unique_sections.items(): if section := self._unique_sections.get(section_name, None): section._update(other_section._to_dict()) else: self._unique_sections[section_name] = copy(other_config._unique_sections[section_name]) if other_config._sections: for section_name, other_non_unique_sections in other_config._sections.items(): if non_unique_sections := self._sections.get(section_name, None): self.__update_sections(non_unique_sections, other_non_unique_sections) else: self._sections[section_name] = {} self.__add_sections(self._sections[section_name], other_non_unique_sections) def __add_sections(self, entity_config, other_entity_configs): for cfg_id, sub_config in other_entity_configs.items(): entity_config[cfg_id] = copy(sub_config) self.__point_nested_section_to_self(sub_config) def __update_sections(self, entity_config, other_entity_configs): if self.DEFAULT_KEY in other_entity_configs: if self.DEFAULT_KEY in entity_config: entity_config[self.DEFAULT_KEY]._update(other_entity_configs[self.DEFAULT_KEY]._to_dict()) else: entity_config[self.DEFAULT_KEY] = other_entity_configs[self.DEFAULT_KEY] for cfg_id, sub_config in other_entity_configs.items(): if cfg_id != self.DEFAULT_KEY: if cfg_id in entity_config: entity_config[cfg_id]._update(sub_config._to_dict(), entity_config.get(self.DEFAULT_KEY)) else: entity_config[cfg_id] = copy(sub_config) entity_config[cfg_id]._update(sub_config._to_dict(), entity_config.get(self.DEFAULT_KEY)) self.__point_nested_section_to_self(sub_config) def __point_nested_section_to_self(self, section): """Loop through attributes of a Section to find if any attribute has a list of Section as value. If there is, update each nested Section by the corresponding instance in self. Args: section (Section): The Section to search for nested sections. """ for _, attr_value in vars(section).items(): # ! This will fail if an attribute is a dictionary, or nested list of Sections. if not isinstance(attr_value, list): continue for index, item in enumerate(attr_value): if not isinstance(item, Section): continue if sub_item := self._sections.get(item.name, {}).get(item.id, None): attr_value[index] = sub_item
|
import toml # type: ignore from .._config import _Config from ..exceptions.exceptions import LoadingError from ._base_serializer import _BaseSerializer class _TomlSerializer(_BaseSerializer): """Convert configuration from TOML representation to Python Dict and reciprocally.""" @classmethod def _write(cls, configuration: _Config, filename: str): with open(filename, "w") as fd: toml.dump(cls._str(configuration), fd) @classmethod def _read(cls, filename: str) -> _Config: try: config_as_dict = cls._pythonify(dict(toml.load(filename))) return cls._from_dict(config_as_dict) except toml.TomlDecodeError as e: error_msg = f"Can not load configuration {e}" raise LoadingError(error_msg) @classmethod def _serialize(cls, configuration: _Config) -> str: return toml.dumps(cls._str(configuration)) @classmethod def _deserialize(cls, config_as_string: str) -> _Config: return cls._from_dict(cls._pythonify(dict(toml.loads(config_as_string))))
|
# # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License.
|
import inspect import re import types from abc import abstractmethod from datetime import datetime, timedelta from typing import Any, Dict, Optional from .._config import _Config from ..common._template_handler import _TemplateHandler from ..common._validate_id import _validate_id from ..common.frequency import Frequency from ..common.scope import Scope from ..exceptions.exceptions import LoadingError from ..global_app.global_app_config import GlobalAppConfig from ..section import Section from ..unique_section import UniqueSection class _BaseSerializer(object): """Base serializer class for taipy configuration.""" _GLOBAL_NODE_NAME = "TAIPY" _section_class = {_GLOBAL_NODE_NAME: GlobalAppConfig} @classmethod @abstractmethod def _write(cls, configuration: _Config, filename: str): raise NotImplementedError @classmethod def _str(cls, configuration: _Config): config_as_dict = {cls._GLOBAL_NODE_NAME: configuration._global_config._to_dict()} for u_sect_name, u_sect in configuration._unique_sections.items(): config_as_dict[u_sect_name] = u_sect._to_dict() for sect_name, sections in configuration._sections.items(): config_as_dict[sect_name] = cls._to_dict(sections) return cls._stringify(config_as_dict) @classmethod def _to_dict(cls, sections: Dict[str, Any]): return {section_id: section._to_dict() for section_id, section in sections.items()} @classmethod def _stringify(cls, as_dict): if as_dict is None: return None if isinstance(as_dict, Section): return as_dict.id + ":SECTION" if isinstance(as_dict, Scope): return as_dict.name + ":SCOPE" if isinstance(as_dict, Frequency): return as_dict.name + ":FREQUENCY" if isinstance(as_dict, bool): return str(as_dict) + ":bool" if isinstance(as_dict, int): return str(as_dict) + ":int" if isinstance(as_dict, float): return str(as_dict) + ":float" if isinstance(as_dict, datetime): return as_dict.isoformat() + ":datetime" if isinstance(as_dict, timedelta): return cls._timedelta_to_str(as_dict) + ":timedelta" if inspect.isfunction(as_dict) or isinstance(as_dict, types.BuiltinFunctionType): return as_dict.__module__ + "." + as_dict.__name__ + ":function" if inspect.isclass(as_dict): return as_dict.__module__ + "." + as_dict.__qualname__ + ":class" if isinstance(as_dict, dict): return {str(key): cls._stringify(val) for key, val in as_dict.items()} if isinstance(as_dict, list): return [cls._stringify(val) for val in as_dict] if isinstance(as_dict, tuple): return [cls._stringify(val) for val in as_dict] return as_dict @staticmethod def _extract_node(config_as_dict, cls_config, node, config: Optional[Any]) -> Dict[str, Section]: res = {} for key, value in config_as_dict.get(node, {}).items(): # my_task, {input=[], output=[my_data_node], ...} key = _validate_id(key) res[key] = cls_config._from_dict(value, key, config) # if config is None else cls_config._from_dict(key, # value, config) return res @classmethod def _from_dict(cls, as_dict) -> _Config: config = _Config() config._global_config = GlobalAppConfig._from_dict(as_dict.get(cls._GLOBAL_NODE_NAME, {})) for section_name, sect_as_dict in as_dict.items(): if section_class := cls._section_class.get(section_name, None): if issubclass(section_class, UniqueSection): config._unique_sections[section_name] = section_class._from_dict( sect_as_dict, None, None ) # type: ignore elif issubclass(section_class, Section): config._sections[section_name] = cls._extract_node(as_dict, section_class, section_name, config) return config @classmethod def _pythonify(cls, val): match = re.fullmatch(_TemplateHandler._PATTERN, str(val)) if not match: if isinstance(val, str): TYPE_PATTERN = ( r"^(.+):(\bbool\b|\bstr\b|\bint\b|\bfloat\b|\bdatetime\b||\btimedelta\b|" r"\bfunction\b|\bclass\b|\bSCOPE\b|\bFREQUENCY\b|\bSECTION\b)?$" ) match = re.fullmatch(TYPE_PATTERN, str(val)) if match: actual_val = match.group(1) dynamic_type = match.group(2) if dynamic_type == "SECTION": return actual_val if dynamic_type == "FREQUENCY": return Frequency[actual_val] if dynamic_type == "SCOPE": return Scope[actual_val] if dynamic_type == "bool": return _TemplateHandler._to_bool(actual_val) elif dynamic_type == "int": return _TemplateHandler._to_int(actual_val) elif dynamic_type == "float": return _TemplateHandler._to_float(actual_val) elif dynamic_type == "datetime": return _TemplateHandler._to_datetime(actual_val) elif dynamic_type == "timedelta": return _TemplateHandler._to_timedelta(actual_val) elif dynamic_type == "function": return _TemplateHandler._to_function(actual_val) elif dynamic_type == "class": return _TemplateHandler._to_class(actual_val) elif dynamic_type == "str": return actual_val else: error_msg = f"Error loading toml configuration at {val}. {dynamic_type} type is not supported." raise LoadingError(error_msg) if isinstance(val, dict): return {str(k): cls._pythonify(v) for k, v in val.items()} if isinstance(val, list): return [cls._pythonify(v) for v in val] return val @classmethod def _timedelta_to_str(cls, obj: timedelta) -> str: total_seconds = obj.total_seconds() return ( f"{int(total_seconds // 86400)}d" f"{int(total_seconds % 86400 // 3600)}h" f"{int(total_seconds % 3600 // 60)}m" f"{int(total_seconds % 60)}s" )
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 8