text
stringlengths 44
15.3k
|
---|
# Create app to read and display data from Excel file import pandas as pd from taipy import Gui # ---- READ EXCEL ---- df = pd.read_excel( io="data/supermarkt_sales.xlsx", engine="openpyxl", sheet_name="Sales", skiprows=3, usecols="B:R", nrows=1000, ) # Add 'hour' column to dataframe df["hour"] = pd.to_datetime(df["Time"], format="%H:%M:%S").dt.hour # initialization of variables cities = list(df["City"].unique()) types = list(df["Customer_type"].unique()) genders = list(df["Gender"].unique()) city = cities customer_type = types gender = genders layout = {"margin": {"l": 220}} # Markdown for the entire page ## NOTE: {: .orange} references a color from main.css use to style my text ## <text| ## |text> ## "text" here is just a name given to my part/my section ## it has no meaning in the code page = """<|toggle|theme|> <|layout|columns=20 80|gap=30px| <sidebar| ## Please **filter**{: .orange} here: <|{city}|selector|lov={cities}|multiple|label=Select the City|dropdown|on_change=on_filter|width=100%|> <|{customer_type}|selector|lov={types}|multiple|label=Select the Customer Type|dropdown|on_change=on_filter|width=100%|> <|{gender}|selector|lov={genders}|multiple|label=Select the Gender|dropdown|on_change=on_filter|width=100%|> |sidebar> <main_page| # 📊 **Sales**{: .orange} Dashboard <|layout|columns=1 1 1| <total_sales| ## **Total**{: .orange} sales: ### US $ <|{int(df_selection["Total"].sum())}|> |total_sales> <average_rating| ## **Average**{: .orange} Rating: ### <|{round(df_selection["Rating"].mean(), 1)}|> <|{"⭐" * int(round(round(df_selection["Rating"].mean(), 1), 0))}|> |average_rating> <average_sale| ## Average Sales Per **Transaction**{: .orange}: ### US $ <|{round(df_selection["Total"].mean(), 2)}|> |average_sale> |> <br/> Display df_selection in an expandable <|Sales Table|expandable|expanded=False| <|{df_selection}|table|width=100%|page_size=5|rebuild|class_name=table|> |> <charts| <|{sales_by_hour}|chart|x=Hour|y=Total|type=bar|title=Sales by Hour|color=#ff462b|> <|{sales_by_product_line}|chart|x=Total|y=Product|type=bar|orientation=h|title=Sales by Product|layout={layout}|color=#ff462b|> |charts> |main_page> |> Code from [Coding is Fun](https://github.com/Sven-Bo) Get the Taipy Code [here](https://github.com/Avaiga/demo-sales-dashboard) and the original code [here](https://github.com/Sven-Bo/streamlit-sales-dashboard) """ def filter(city, customer_type, gender): df_selection = df[ df["City"].isin(city) & df["Customer_type"].isin(customer_type) & df["Gender"].isin(gender) ] # SALES BY PRODUCT LINE [BAR CHART] sales_by_product_line = ( df_selection[["Product line", "Total"]] .groupby(by=["Product line"]) .sum()[["Total"]] .sort_values(by="Total") ) sales_by_product_line["Product"] = sales_by_product_line.index # SALES BY HOUR [BAR CHART] sales_by_hour = ( df_selection[["hour", "Total"]].groupby(by=["hour"]).sum()[["Total"]] ) sales_by_hour["Hour"] = sales_by_hour.index return df_selection, sales_by_product_line, sales_by_hour def on_filter(state): state.df_selection, state.sales_by_product_line, state.sales_by_hour = filter( state.city, state.customer_type, state.gender ) if __name__ == "__main__": # initialize dataframes df_selection, sales_by_product_line, sales_by_hour = filter( city, customer_type, gender ) # run the app Gui(page).run()
|
# Create an app with slider and chart from taipy.gui import Gui from math import cos, exp value = 10 page = """ Markdown # Taipy *Demo* Value: <|{value}|text|> <|{value}|slider|on_change=on_slider|> <|{data}|chart|> """ def compute_data(decay:int)->list: return [cos(i/6) * exp(-i*decay/600) for i in range(100)] def on_slider(state): state.data = compute_data(state.value) data = compute_data(value) Gui(page).run(use_reloader=True, port=5002)
|
# Create app to predict covid in the world from taipy.gui import Gui import taipy as tp from pages.country.country import country_md from pages.world.world import world_md from pages.map.map import map_md from pages.predictions.predictions import predictions_md, selected_scenario from pages.root import root, selected_country, selector_country from config.config import Config pages = { '/':root, "Country":country_md, "World":world_md, "Map":map_md, "Predictions":predictions_md } gui_multi_pages = Gui(pages=pages) if __name__ == '__main__': tp.Core().run() gui_multi_pages.run(title="Covid Dashboard")
|
# Create app for finance data analysis import yfinance as yf from taipy.gui import Gui from taipy.gui.data.decimator import MinMaxDecimator, RDP, LTTB df_AAPL = yf.Ticker("AAPL").history(interval="1d", period="100Y") df_AAPL["DATE"] = df_AAPL.index.astype("int64").astype(float) n_out = 500 decimator_instance = MinMaxDecimator(n_out=n_out) decimate_data_count = len(df_AAPL) page = """ # Decimator From a data length of <|{len(df_AAPL)}|> to <|{n_out}|> ## Without decimator <|{df_AAPL}|chart|x=DATE|y=Open|> ## With decimator <|{df_AAPL}|chart|x=DATE|y=Open|decimator=decimator_instance|> """ gui = Gui(page) gui.run(port=5026)
|
# Create an app to upload a csv and display it in a table from taipy.gui import Gui import pandas as pd data = [] data_path = "" def data_upload(state): state.data = pd.read_csv(state.data_path) page = """ <|{data_path}|file_selector|on_action=data_upload|> <|{data}|table|> """ Gui(page).run()
|
# Create an app to visualize sin and amp with slider and chart from taipy.gui import Gui from math import cos, exp state = {"amp": 1, "data":[]} def update(state): x = [i/10 for i in range(100)] y = [math.sin(i)*state.amp for i in x] state.data = [{"data": y}] page = """ Amplitude: <|{amp}|slider|> <|Data|chart|data={data}|> """ Gui(page).run(state=state)
|
# Create an app to visualize sin, cos with slider and chart from taipy.gui import Gui from math import sin, cos, pi state = { "frequency": 1, "decay": 0.01, "data": [] } page = """ # Sine and Cosine Functions Frequency: <|{frequency}|slider|min=0|max=10|step=0.1|on_change=update|> Decay: <|{decay}|slider|min=0|max=1|step=0.01|on_change=update|> <|Data|chart|data={data}|> """ def update(state): x = [i/10 for i in range(100)] y1 = [sin(i*state.frequency*2*pi) * exp(-i*state.decay) for i in x] y2 = [cos(i*state.frequency*2*pi) * exp(-i*state.decay) for i in x] state.data = [ {"name": "Sine", "data": y1}, {"name": "Cosine", "data": y2} ] Gui(page).run(use_reloader=True, state=state)
|
# Create app to visualize country population import numpy as np import pandas as pd from taipy.gui import Markdown from data.data import data selected_country = 'France' data_country_date = None representation_selector = ['Cumulative', 'Density'] selected_representation = representation_selector[0] layout = {'barmode':'stack', "hovermode":"x"} options = {"unselected":{"marker":{"opacity":0.5}}} def initialize_case_evolution(data, selected_country='France'): # Aggregation of the dataframe to erase the regions that will not be used here data_country_date = data.groupby(["Country/Region",'Date'])\ .sum()\ .reset_index() # a country is selected, here France by default data_country_date = data_country_date.loc[data_country_date['Country/Region']==selected_country] return data_country_date data_country_date = initialize_case_evolution(data) pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"],"values": [data_country_date.iloc[-1, 6], data_country_date.iloc[-1, 5], data_country_date.iloc[-1, 4]]}) def convert_density(state): if state.selected_representation == 'Density': df_temp = state.data_country_date.copy() df_temp['Deaths'] = df_temp['Deaths'].diff().fillna(0) df_temp['Recovered'] = df_temp['Recovered'].diff().fillna(0) df_temp['Confirmed'] = df_temp['Confirmed'].diff().fillna(0) state.data_country_date = df_temp else: state.data_country_date = initialize_case_evolution(data, state.selected_country) def on_change_country(state): # state contains all the Gui variables and this is through this state variable that we can update the Gui # state.selected_country, state.data_country_date, ... # update data_country_date with the right country (use initialize_case_evolution) print("Chosen country: ", state.selected_country) state.data_country_date = initialize_case_evolution(data, state.selected_country) state.pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"], "values": [state.data_country_date.iloc[-1, 6], state.data_country_date.iloc[-1, 5], state.data_country_date.iloc[-1, 4]]}) convert_density(state) page =""" # **Country**{: .color-primary} Statistics <|layout|columns=1 1 1| <|{selected_country}|selector|lov={selector_country}|on_change=on_change_country|dropdown|label=Country|> <|{selected_representation}|toggle|lov={representation_selector}|on_change=convert_density|> |> <br/> <|layout|columns=1 1 1 1|gap=50px| <|card| **Deaths**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Deaths'])).replace(',', ' ')}|text|class_name=h2|> |> <|card| **Recovered**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Recovered'])).replace(',', ' ')}|text|class_name=h2|> |> <|card| **Confirmed**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Confirmed'])).replace(',', ' ')}|text|class_name=h2|> |> |> <br/> <|layout|columns=2 1| <|{data_country_date}|chart|type=bar|x=Date|y[3]=Deaths|y[2]=Recovered|y[1]=Confirmed|layout={layout}|options={options}|title=Covid Evolution|> <|{pie_chart}|chart|type=pie|values=values|labels=labels|title=Distribution between cases|> |> """ Gui(page).run(use_reloader=True, state=state)
|
# Create Taipy app to generate mandelbrot fractals from taipy import Gui import numpy as np from PIL import Image import matplotlib.pyplot as plt WINDOW_SIZE = 500 cm = plt.cm.get_cmap("viridis") def generate_mandelbrot( center: int = WINDOW_SIZE / 2, dx_range: int = 1000, dx_start: float = -0.12, dy_range: float = 1000, dy_start: float = -0.82, iterations: int = 50, max_value: int = 200, i: int = 0, ) -> str: mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE)) for y in range(WINDOW_SIZE): for x in range(WINDOW_SIZE): dx = (x - center) / dx_range + dx_start dy = (y - center) / dy_range + dy_start a = dx b = dy for t in range(iterations): d = (a * a) - (b * b) + dx b = 2 * (a * b) + dy a = d h = d > max_value if h is True: mat[x, y] = t colored_mat = cm(mat / mat.max()) im = Image.fromarray((colored_mat * 255).astype(np.uint8)) path = f"mandelbrot_{i}.png" im.save(path) return path def generate(state): state.i = state.i + 1 state.path = generate_mandelbrot( dx_start=-state.dx_start / 100, dy_start=(state.dy_start - 100) / 100, iterations=state.iterations, i=state.i, ) i = 0 dx_start = 11 dy_start = 17 iterations = 50 path = generate_mandelbrot( dx_start=-dx_start / 100, dy_start=(dy_start - 100) / 100, ) page = """ # Mandelbrot Generator <|layout|columns=35 65| Display image from path <|{path}|image|width=500px|height=500px|class_name=img|> Iterations:<br /> Create a slider to select iterations <|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|><br /> X Position:<br /> <|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|><br /> Y Position:<br /> Slider dx_start <|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|><br /> |> """ Gui(page).run(title="Mandelbrot Generator")
|
# Create app to auto generate Tweeter status import logging import random import re # Import from 3rd party libraries from taipy.gui import Gui, notify, state import taipy # Import modules import oai # Configure logger logging.basicConfig(format="\n%(asctime)s\n%(message)s", level=logging.INFO, force=True) def error_prompt_flagged(state, prompt): """Notify user that a prompt has been flagged.""" notify(state, "error", "Prompt flagged as inappropriate.") logging.info(f"Prompt flagged as inappropriate: {prompt}") def error_too_many_requests(state): """Notify user that too many requests have been made.""" notify( state, "error", "Too many requests. Please wait a few seconds before generating another text or image.", ) logging.info(f"Session request limit reached: {state.n_requests}") state.n_requests = 1 # Define functions def generate_text(state): """Generate Tweet text.""" state.tweet = "" state.image = None # Check the number of requests done by the user if state.n_requests >= 5: error_too_many_requests(state) return # Check if the user has put a topic if state.topic == "": notify(state, "error", "Please enter a topic") return # Create the prompt and add a style or not if state.style == "": state.prompt = ( f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters " f"and with the style of {state.style}:\n\n\n\n" ) else: state.prompt = f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\n\n" # openai configured and check if text is flagged openai = oai.Openai() flagged = openai.moderate(state.prompt) if flagged: error_prompt_flagged(state, f"Prompt: {state.prompt}\n") return else: # Generate the tweet state.n_requests += 1 state.tweet = openai.complete(state.prompt).strip().replace('"', "") # Notify the user in console and in the GUI logging.info( f"Topic: {state.prompt}{state.mood}{state.style}\n" f"Tweet: {state.tweet}" ) notify(state, "success", "Tweet created!") def generate_image(state): """Generate Tweet image.""" notify(state, "info", "Generating image...") # Check the number of requests done by the user if state.n_requests >= 5: error_too_many_requests(state) return state.image = None # Creates the prompt prompt_wo_hashtags = re.sub("#[A-Za-z0-9_]+", "", state.prompt) processing_prompt = ( "Create a detailed but brief description of an image that captures " f"the essence of the following text:\n{prompt_wo_hashtags}\n\n" ) # Openai configured and check if text is flagged openai = oai.Openai() flagged = openai.moderate(processing_prompt) if flagged: error_prompt_flagged(state, processing_prompt) return else: state.n_requests += 1 # Generate the prompt that will create the image processed_prompt = ( openai.complete(prompt=processing_prompt, temperature=0.5, max_tokens=40) .strip() .replace('"', "") .split(".")[0] + "." ) # Generate the image state.image = openai.image(processed_prompt) # Notify the user in console and in the GUI logging.info(f"Tweet: {state.prompt}\nImage prompt: {processed_prompt}") notify(state, "success", f"Image created!") def feeling_lucky(state): """Generate a feeling-lucky tweet.""" with open("moods.txt") as f: sample_moods = f.read().splitlines() state.topic = "an interesting topic" state.mood = random.choice(sample_moods) state.style = "" generate_text(state) # Variables tweet = "" prompt = "" n_requests = 0 topic = "AI" mood = "inspirational" style = "elonmusk" image = None # Called whever there is a problem def on_exception(state, function_name: str, ex: Exception): logging.error(f"Problem {ex} \nin {function_name}") notify(state, "error", f"Problem {ex} \nin {function_name}") def update_documents(state: taipy.gui.state, docs: list[dict]) -> None: """ Updates a partial with a list of documents Args: state: The state of the GUI docs: A list of documents """ updated_part = "" for doc in docs: title = doc["title"] summary = doc["summary"] link = doc["link"] updated_part += f""" <a href="{link}" target="_blank"> <h3>{title}</h3> </a> <p>{summary}</p> <br/> """ state.p.update_content(state, updated_part) # Markdown for the entire page ## <text| ## |text> ## "text" here is just a name given to my part/my section ## it has no meaning in the code page = """ <|container| # **Generate**{: .color-primary} Tweets This mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL·E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal). <br/> <a href="{azaz}" target="_blank"> <h3>{sqdqs}</h3> </a> <p>{qfqffqs}</p> <br/> <|layout|columns=1 1 1|gap=30px|class_name=card| <topic| ## **Topic**{: .color-primary} (or hashtag) <|{topic}|input|label=Topic (or hashtag)|> |topic> <mood| ## **Mood**{: .color-primary} <|{mood}|input|label=Mood (e.g. inspirational, funny, serious) (optional)|> |mood> <style| ## Twitter **account**{: .color-primary} <|{style}|input|label=Twitter account handle to style-copy recent Tweets (optional)|> |style> Create a Generate text button <|Generate text|button|on_action=generate_text|label=Generate text|> <|Feeling lucky|button|on_action=feeling_lucky|label=Feeling Lucky|> |> <br/> --- <br/> ### Generated **Tweet**{: .color-primary} Create a text input for the tweet <|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|> <center><|Generate image|button|on_action=generate_image|label=Generate image|active={prompt!="" and tweet!=""}|></center> <image|part|render={prompt != "" and tweet != "" and image is not None}|class_name=card| ### **Image**{: .color-primary} from Dall-e Display image <center><|{image}|image|height=400px|></center> |image> Break line <br/> **Code from [@kinosal](https://twitter.com/kinosal)** Original code can be found [here](https://github.com/kinosal/tweet) |> """ if __name__ == "__main__": Gui(page).run(dark_mode=False, port=5089)
|
# Create app for py2jsonl3.py py2jsonl3.py
import os
import json
EXCLUDED_FILES = ["CODE_OF_CONDUCT.md", "CONTRIBUTING.md", "INSTALLATION.md", "README.md"]
def find_files(directory, extensions):
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(extensions) and file not in EXCLUDED_FILES:
yield os.path.join(root, file)
def extract_content(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
return file.read()
def write_to_jsonl(output_file, data):
with open(output_file, 'a', encoding='utf-8') as file:
json_record = json.dumps(data)
file.write(json_record + '\n')
def main(directory, output_file):
for file_path in find_files(directory, ('.py', '.md')):
file_content = extract_content(file_path)
file_comment = f"# Create app for {os.path.basename(file_path)}"
data = {"text": file_comment + '\n' + file_content}
write_to_jsonl(output_file, data)
directory = 'taipy_repos3' # Replace with the path to your directory
output_file = 'output.jsonl' # Name of the output JSONL file
main(directory, output_file)
|
# Create app for demo-remove-background main.py
from taipy.gui import Gui, notify
from rembg import remove
from PIL import Image
from io import BytesIO
path_upload = ""
path_download = "fixed_img.png"
original_image = None
fixed_image = None
fixed = False
page = """<|toggle|theme|>
<page|layout|columns=300px 1fr|
<|sidebar|
### Removing **Background**{: .color-primary} from your image
<br/>
Upload and download
<|{path_upload}|file_selector|on_action=fix_image|extensions=.png,.jpg|label=Upload original image|>
<br/>
Download it here
<|{path_download}|file_download|label=Download fixed image|active={fixed}|>
|>
<|container|
# Image Background **Eliminator**{: .color-primary}
🐶 Give it a try by uploading an image to witness the seamless removal of the background. You can download images in full quality from the sidebar.
This code is open source and accessible on [GitHub](https://github.com/Avaiga/demo-remove-background).
<br/>
<images|layout|columns=1 1|
<col1|card text-center|part|render={fixed}|
### Original Image 📷
<|{original_image}|image|>
|col1>
<col2|card text-center|part|render={fixed}|
### Fixed Image 🔧
<|{fixed_image}|image|>
|col2>
|images>
|>
|page>
"""
def convert_image(img):
buf = BytesIO()
img.save(buf, format="PNG")
byte_im = buf.getvalue()
return byte_im
def fix_image(state):
notify(state, 'info', 'Uploading original image...')
image = Image.open(state.path_upload)
notify(state, 'info', 'Removing the background...')
fixed_image = remove(image)
fixed_image.save("fixed_img.png")
notify(state, 'success', 'Background removed successfully!')
state.original_image = convert_image(image)
state.fixed_image = convert_image(fixed_image)
state.fixed = True
if __name__ == "__main__":
Gui(page=page).run(margin="0px", title='Background Remover')
|
# Create app for demo-tweet-generation oai.py
"""OpenAI API connector."""
# Import from standard library
import os
import logging
# Import from 3rd party libraries
import openai
import os
# Assign credentials from environment variable or streamlit secrets dict
openai.api_key = "Enter your token here"
# Suppress openai request/response logging
# Handle by manually changing the respective APIRequestor methods in the openai package
# Does not work hosted on Streamlit since all packages are re-installed by Poetry
# Alternatively (affects all messages from this logger):
logging.getLogger("openai").setLevel(logging.WARNING)
class Openai:
"""OpenAI Connector."""
@staticmethod
def moderate(prompt: str) -> bool:
"""Call OpenAI GPT Moderation with text prompt.
Args:
prompt: text prompt
Return: boolean if flagged
"""
try:
response = openai.Moderation.create(prompt)
return response["results"][0]["flagged"]
except Exception as e:
logging.error(f"OpenAI API error: {e}")
@staticmethod
def complete(prompt: str, temperature: float = 0.9, max_tokens: int = 50) -> str:
"""Call OpenAI GPT Completion with text prompt.
Args:
prompt: text prompt
Return: predicted response text
"""
kwargs = {
"engine": "text-davinci-003",
"prompt": prompt,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": 1, # default
"frequency_penalty": 0, # default,
"presence_penalty": 0, # default
}
try:
response = openai.Completion.create(**kwargs)
return response["choices"][0]["text"]
except Exception as e:
logging.error(f"OpenAI API error: {e}")
@staticmethod
def image(prompt: str) -> str:
"""Call OpenAI Image Create with text prompt.
Args:
prompt: text prompt
Return: image url
"""
try:
response = openai.Image.create(
prompt=prompt,
n=1,
size="512x512",
response_format="url",
)
return response["data"][0]["url"]
except Exception as e:
logging.error(f"OpenAI API error: {e}")
|
# Create app for demo-tweet-generation main.py
# Import from standard library
import logging
import random
import re
# Import from 3rd party libraries
from taipy.gui import Gui, notify
# Import modules
import oai
# Configure logger
logging.basicConfig(format="\n%(asctime)s\n%(message)s", level=logging.INFO, force=True)
def error_prompt_flagged(state, prompt):
"""Notify user that a prompt has been flagged."""
notify(state, "error", "Prompt flagged as inappropriate.")
logging.info(f"Prompt flagged as inappropriate: {prompt}")
def error_too_many_requests(state):
"""Notify user that too many requests have been made."""
notify(state, "error", "Too many requests. Please wait a few seconds before generating another text or image.")
logging.info(f"Session request limit reached: {state.n_requests}")
state.n_requests = 1
# Define functions
def generate_text(state):
"""Generate Tweet text."""
state.tweet = ""
state.image = None
# Check the number of requests done by the user
if state.n_requests >= 5:
error_too_many_requests(state)
return
# Check if the user has put a topic
if state.topic == "":
notify(state, "error", "Please enter a topic")
return
# Create the prompt and add a style or not
if state.style == "":
state.prompt = (
f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters "
f"and with the style of {state.style}:\n\n\n\n"
)
else:
state.prompt = f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\n\n"
# openai configured and check if text is flagged
openai = oai.Openai()
flagged = openai.moderate(state.prompt)
if flagged:
error_prompt_flagged(state, f"Prompt: {state.prompt}\n")
return
else:
# Generate the tweet
state.n_requests += 1
state.tweet = (
openai.complete(state.prompt).strip().replace('"', "")
)
# Notify the user in console and in the GUI
logging.info(
f"Topic: {state.prompt}{state.mood}{state.style}\n"
f"Tweet: {state.tweet}"
)
notify(state, "success", "Tweet created!")
def generate_image(state):
"""Generate Tweet image."""
notify(state, "info", "Generating image...")
# Check the number of requests done by the user
if state.n_requests >= 5:
error_too_many_requests(state)
return
state.image = None
# Creates the prompt
prompt_wo_hashtags = re.sub("#[A-Za-z0-9_]+", "", state.prompt)
processing_prompt = (
"Create a detailed but brief description of an image that captures "
f"the essence of the following text:\n{prompt_wo_hashtags}\n\n"
)
# Openai configured and check if text is flagged
openai = oai.Openai()
flagged = openai.moderate(processing_prompt)
if flagged:
error_prompt_flagged(state, processing_prompt)
return
else:
state.n_requests += 1
# Generate the prompt that will create the image
processed_prompt = (
openai.complete(
prompt=processing_prompt, temperature=0.5, max_tokens=40
)
.strip()
.replace('"', "")
.split(".")[0]
+ "."
)
# Generate the image
state.image = openai.image(processed_prompt)
# Notify the user in console and in the GUI
logging.info(f"Tweet: {state.prompt}\nImage prompt: {processed_prompt}")
notify(state, "success", f"Image created!")
# Variables
tweet = ""
prompt = ""
n_requests = 0
topic = "AI"
mood = "inspirational"
style = "elonmusk"
image = None
# Called whever there is a problem
def on_exception(state, function_name: str, ex: Exception):
logging.error(f"Problem {ex} \nin {function_name}")
notify(state, 'error', f"Problem {ex} \nin {function_name}")
# Markdown for the entire page
## <text|
## |text>
## "text" here is just a name given to my part/my section
## it has no meaning in the code
page = """
<|container|
# **Generate**{: .color-primary} Tweets
This mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL·E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).
<br/>
<|layout|columns=1 1 1|gap=30px|class_name=card|
<topic|
## **Topic**{: .color-primary} (or hashtag)
<|{topic}|input|label=Topic (or hashtag)|>
|topic>
<mood|
## **Mood**{: .color-primary}
<|{mood}|input|label=Mood (e.g. inspirational, funny, serious) (optional)|>
|mood>
<style|
## Twitter **account**{: .color-primary}
<|{style}|input|label=Twitter account handle to style-copy recent Tweets (optional)|>
|style>
<|Generate text|button|on_action=generate_text|label=Generate text|>
|>
<br/>
---
<br/>
### Generated **Tweet**{: .color-primary}
<|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|>
<center><|Generate image|button|on_action=generate_image|label=Generate image|active={prompt!="" and tweet!=""}|></center>
<image|part|render={prompt != "" and tweet != "" and image is not None}|class_name=card|
### **Image**{: .color-primary} from Dall-e
<center><|{image}|image|height=400px|></center>
|image>
<br/>
**Code from [@kinosal](https://twitter.com/kinosal)**
Original code can be found [here](https://github.com/kinosal/tweet)
|>
"""
if __name__ == "__main__":
Gui(page).run(title='Tweet Generation')
|
# Create app for demo-realtime-pollution sender.py
# echo-client.py
import math
import time
import socket
import pickle
import numpy as np
HOST = "127.0.0.1"
PORT = 65432
init_lat = 49.247
init_long = 1.377
factory_lat = 49.246
factory_long = 1.369
diff_lat = abs(init_lat - factory_lat) * 15
diff_long = abs(init_long - factory_long) * 15
lats_unique = np.arange(init_lat - diff_lat, init_lat + diff_lat, 0.001)
longs_unique = np.arange(init_long - diff_long, init_long + diff_long, 0.001)
countdown = 20
def pollution(lat: float, long: float):
"""
Return pollution level in percentage
Pollution should be centered around the factory
Pollution should decrease with distance to factory
Pollution should have an added random component
Args:
- lat: latitude
- long: longitude
Returns:
- pollution level
"""
global countdown
return 80 * (0.5 + 0.5 * math.sin(countdown / 20)) * math.exp(
-(0.8 * (lat - factory_lat) ** 2 + 0.2 * (long - factory_long) ** 2) / 0.00005
) + np.random.randint(0, 50)
lats = []
longs = []
pollutions = []
for lat in lats_unique:
for long in longs_unique:
lats.append(lat)
longs.append(long)
pollutions.append(pollution(lat, long))
def update():
"""
Update the pollution levels
"""
for i, _ in enumerate(lats):
pollutions[i] = pollution(lats[i], longs[i])
return pollutions
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
while True:
data = pickle.dumps(pollutions)
s.sendall(data)
print(f"Sent Data: {pollutions[:5]}")
pollutions = update()
countdown += 5
time.sleep(5)
|
# Create app for demo-realtime-pollution receiver.py
import socket
import pickle
import math
from threading import Thread
from taipy.gui import Gui, State, invoke_callback, get_state_id
import numpy as np
import pandas as pd
init_lat = 49.247
init_long = 1.377
factory_lat = 49.246
factory_long = 1.369
diff_lat = abs(init_lat - factory_lat) * 15
diff_long = abs(init_long - factory_long) * 15
lats_unique = np.arange(init_lat - diff_lat, init_lat + diff_lat, 0.001)
longs_unique = np.arange(init_long - diff_long, init_long + diff_long, 0.001)
countdown = 20
periods = 0
line_data = pd.DataFrame({"Time": [], "Max AQI": []})
drone_data = pd.DataFrame(
{
"Drone ID": [43, 234, 32, 23, 5, 323, 12, 238, 21, 84],
"Battery Level": [
"86%",
"56%",
"45%",
"12%",
"85%",
"67%",
"34%",
"78%",
"90%",
"100%",
],
"AQI": [40, 34, 24, 22, 33, 45, 23, 34, 23, 34],
"Status": [
"Moving",
"Measuring",
"Measuring",
"Stopped",
"Measuring",
"Moving",
"Moving",
"Measuring",
"Measuring",
"Measuring",
],
}
)
HOST = "127.0.0.1"
PORT = 65432
layout_map = {
"mapbox": {
"style": "open-street-map",
"center": {"lat": init_lat, "lon": init_long},
"zoom": 13,
},
"dragmode": "false",
"margin": {"l": 0, "r": 0, "b": 0, "t": 0},
}
layout_line = {
"title": "Max Measured AQI over Time",
"yaxis": {"range": [0, 150]},
}
options = {
"opacity": 0.8,
"colorscale": "Bluered",
"zmin": 0,
"zmax": 140,
"colorbar": {"title": "AQI"},
"hoverinfo": "none",
}
config = {"scrollZoom": False, "displayModeBar": False}
def pollution(lat: float, long: float):
"""
Return pollution level in percentage
Pollution should be centered around the factory
Pollution should decrease with distance to factory
Pollution should have an added random component
Args:
- lat: latitude
- long: longitude
Returns:
- pollution level
"""
global countdown
return 80 * (0.5 + 0.5 * math.sin(countdown / 20)) * math.exp(
-(0.8 * (lat - factory_lat) ** 2 + 0.2 * (long - factory_long) ** 2) / 0.00005
) + np.random.randint(0, 50)
lats = []
longs = []
pollutions = []
times = []
max_pollutions = []
for lat in lats_unique:
for long in longs_unique:
lats.append(lat)
longs.append(long)
pollutions.append(pollution(lat, long))
data_province_displayed = pd.DataFrame(
{
"Latitude": lats,
"Longitude": longs,
"Pollution": pollutions,
}
)
max_pollution = data_province_displayed["Pollution"].max()
# Socket handler
def client_handler(gui: Gui, state_id_list: list):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen()
conn, _ = s.accept()
while True:
if data := conn.recv(1024 * 1024):
pollutions = pickle.loads(data)
print(f"Data received: {pollutions[:5]}")
if hasattr(gui, "_server") and state_id_list:
invoke_callback(
gui,
state_id_list[0],
update_pollutions,
[pollutions],
)
else:
print("Connection closed")
break
# Gui declaration
state_id_list = []
Gui.add_shared_variable("pollutions")
def on_init(state: State):
state_id = get_state_id(state)
if (state_id := get_state_id(state)) is not None and state_id != "":
state_id_list.append(state_id)
update_pollutions(state, pollutions)
def update_pollutions(state: State, val):
state.pollutions = val
state.data_province_displayed = pd.DataFrame(
{
"Latitude": lats,
"Longitude": longs,
"Pollution": state.pollutions,
}
)
# Add an hour to the time
state.periods = state.periods + 1
state.max_pollutions = state.max_pollutions + [max(state.pollutions)]
state.times = pd.date_range(
"2020-11-04", periods=len(state.max_pollutions), freq="H"
)
state.line_data = pd.DataFrame(
{
"Time": state.times,
"Max AQI": state.max_pollutions,
}
)
page = """
<|{data_province_displayed}|chart|type=densitymapbox|plot_config={config}|options={options}|lat=Latitude|lon=Longitude|layout={layout_map}|z=Pollution|mode=markers|class_name=map|height=40vh|>
<|layout|columns=1 2 2|
<|part|class_name=card|
**Max Measured AQI:**<br/><br/><br/>
<|{int(data_province_displayed["Pollution"].max())}|indicator|value={int(data_province_displayed["Pollution"].max())}|min=140|max=0|>
<br/><br/>
**Average Measured AQI:**<br/><br/><br/>
<|{int(data_province_displayed["Pollution"].mean())}|indicator|value={int(data_province_displayed["Pollution"].mean())}|min=140|max=0|>
|>
<|part|class_name=card|
<|{drone_data}|table|show_all=True|>
|>
<|part|class_name=card|
<|{line_data[-30:]}|chart|type=lines|x=Time|y=Max AQI|layout={layout_line}|height=40vh|>
|>
|>
"""
gui = Gui(page=page)
t = Thread(
target=client_handler,
args=(
gui,
state_id_list,
),
)
t.start()
gui.run(run_browser=False)
|
# Create app for demo-pyspark-penguin-app config.py
### app/config.py
import datetime as dt
import os
import subprocess
import sys
from pathlib import Path
import pandas as pd
import taipy as tp
from taipy import Config
SCRIPT_DIR = Path(__file__).parent
SPARK_APP_PATH = SCRIPT_DIR / "penguin_spark_app.py"
input_csv_path = str(SCRIPT_DIR / "penguins.csv")
# -------------------- Data Nodes --------------------
input_csv_path_cfg = Config.configure_data_node(id="input_csv_path", default_data=input_csv_path)
# Path to save the csv output of the spark app
output_csv_path_cfg = Config.configure_data_node(id="output_csv_path")
processed_penguin_df_cfg = Config.configure_parquet_data_node(
id="processed_penguin_df", validity_period=dt.timedelta(days=1)
)
species_cfg = Config.configure_data_node(id="species") # "Adelie", "Chinstrap", "Gentoo"
island_cfg = Config.configure_data_node(id="island") # "Biscoe", "Dream", "Torgersen"
sex_cfg = Config.configure_data_node(id="sex") # "male", "female"
output_cfg = Config.configure_json_data_node(
id="output",
)
# -------------------- Tasks --------------------
def spark_process(input_csv_path: str, output_csv_path: str) -> pd.DataFrame:
proc = subprocess.Popen(
[
str(Path(sys.executable).with_name("spark-submit")),
str(SPARK_APP_PATH),
"--input-csv-path",
input_csv_path,
"--output-csv-path",
output_csv_path,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
outs, errs = proc.communicate(timeout=15)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
if proc.returncode != os.EX_OK:
raise Exception("Spark training failed")
df = pd.read_csv(output_csv_path)
return df
def filter(penguin_df: pd.DataFrame, species: str, island: str, sex: str) -> dict:
df = penguin_df[(penguin_df.species == species) & (penguin_df.island == island) & (penguin_df.sex == sex)]
output = df[["bill_length_mm", "bill_depth_mm", "flipper_length_mm", "body_mass_g"]].to_dict(orient="records")
return output[0] if output else dict()
spark_process_task_cfg = Config.configure_task(
id="spark_process",
function=spark_process,
skippable=True,
input=[input_csv_path_cfg, output_csv_path_cfg],
output=processed_penguin_df_cfg,
)
filter_task_cfg = Config.configure_task(
id="filter",
function=filter,
skippable=True,
input=[processed_penguin_df_cfg, species_cfg, island_cfg, sex_cfg],
output=output_cfg,
)
scenario_cfg = Config.configure_scenario(
id="scenario", task_configs=[spark_process_task_cfg, filter_task_cfg]
)
|
# Create app for demo-pyspark-penguin-app main.py
### app/main.py
from pathlib import Path
from typing import Optional
import taipy as tp
from config import scenario_cfg
from taipy.gui import Gui, notify
valid_features: dict[str, list[str]] = {
"species": ["Adelie", "Chinstrap", "Gentoo"],
"island": ["Torgersen", "Biscoe", "Dream"],
"sex": ["Male", "Female"],
}
selected_species = valid_features["species"][0]
selected_island = valid_features["island"][0]
selected_sex = valid_features["sex"][0]
selected_scenario: Optional[tp.Scenario] = None
data_dir = Path(__file__).with_name("data")
data_dir.mkdir(exist_ok=True)
def scenario_on_creation(state, id, payload):
_ = payload["config"]
date = payload["date"]
label = payload["label"]
properties = payload["properties"]
# Create scenario with selected configuration
scenario = tp.create_scenario(scenario_cfg, creation_date=date, name=label)
scenario.properties.update(properties)
# Write the selected GUI values to the scenario
scenario.species.write(state.selected_species)
scenario.island.write(state.selected_island)
scenario.sex.write(state.selected_sex.lower())
output_csv_file = data_dir / f"{scenario.id}.csv"
scenario.output_csv_path.write(str(output_csv_file))
notify(state, "S", f"Created {scenario.id}")
return scenario
def scenario_on_submission_change(state, submittable, details):
"""When the selected_scenario's submission status changes, reassign selected_scenario to force a GUI refresh."""
state.selected_scenario = submittable
selected_data_node = None
main_md = """
<|layout|columns=1 4|gap=1.5rem|
<lhs|part|
# Spark with **Taipy**{: .color-primary}
## Scenario
<|{selected_scenario}|scenario_selector|on_creation=scenario_on_creation|>
----------
## Scenario info
<|{selected_scenario}|scenario|on_submission_change=scenario_on_submission_change|>
|lhs>
<rhs|part|render={selected_scenario}|
## Selections
<selections|layout|columns=1 1 1 2|gap=1.5rem|
<|{selected_species}|selector|lov={valid_features["species"]}|dropdown|label=Species|>
<|{selected_island}|selector|lov={valid_features["island"]}|dropdown|label=Island|>
<|{selected_sex}|selector|lov={valid_features["sex"]}|dropdown|label=Sex|>
|selections>
----------
## Output
**<|{str(selected_scenario.output.read()) if selected_scenario and selected_scenario.output.is_ready_for_reading else 'Submit the scenario using the left panel.'}|text|raw|class_name=color-primary|>**
## Data node inspector
<|{selected_data_node}|data_node_selector|display_cycles=False|>
**Data node value:**
<|{str(selected_data_node.read()) if selected_data_node and selected_data_node.is_ready_for_reading else None}|>
<br/>
----------
## DAG
<|Scenario DAG|expandable|
<|{selected_scenario}|scenario_dag|>
|>
|rhs>
|>
"""
def on_change(state, var_name: str, var_value):
if var_name == "selected_species":
state.selected_scenario.species.write(var_value)
elif var_name == "selected_island":
state.selected_scenario.island.write(var_value)
elif var_name == "selected_sex":
state.selected_scenario.sex.write(var_value.lower())
if __name__ == "__main__":
tp.Core().run()
gui = Gui(main_md)
gui.run(title="Spark with Taipy")
|
# Create app for demo-pyspark-penguin-app penguin_spark_app.py
### app/penguin_spark_app.py
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--input-csv-path", required=True, help="Path to the input penguin CSV file.")
parser.add_argument("--output-csv-path", required=True, help="Path to save the output CSV file.")
args = parser.parse_args()
import pyspark.pandas as ps
from pyspark.sql import SparkSession
def read_penguin_df(csv_path: str):
penguin_df = ps.read_csv(csv_path)
return penguin_df
def clean(df: ps.DataFrame) -> ps.DataFrame:
return df[df.sex.isin(["male", "female"])].dropna()
def process(df: ps.DataFrame) -> ps.DataFrame:
"""The mean of measured penguin values, grouped by island and sex."""
mean_df = df.groupby(by=["species", "island", "sex"]).agg("mean").drop(columns="year").reset_index()
return mean_df
if __name__ == "__main__":
spark = SparkSession.builder.appName("Mean Penguin").getOrCreate()
penguin_df = read_penguin_df(args.input_csv_path)
cleaned_penguin_df = clean(penguin_df)
processed_penguin_df = process(cleaned_penguin_df)
processed_penguin_df.to_pandas().to_csv(args.output_csv_path, index=False)
sys.exit(os.EX_OK)
|
# Create app for demo-dask-customer-analysis config.py
from taipy import Config
from algos.algo import (
preprocess_and_score,
featurization_and_segmentation,
segment_analysis,
high_value_cust_summary_statistics,
)
# -------------------- Data Nodes --------------------
path_to_data_cfg = Config.configure_data_node(id="path_to_data", default_data="data/customers_data.csv")
scored_df_cfg = Config.configure_data_node(id="scored_df")
payment_threshold_cfg = Config.configure_data_node(id="payment_threshold", default_data=1000)
score_threshold_cfg = Config.configure_data_node(id="score_threshold", default_data=1.5)
segmented_customer_df_cfg = Config.configure_data_node(id="segmented_customer_df")
metric_cfg = Config.configure_data_node(id="metric", default_data="mean")
segment_result_cfg = Config.configure_data_node(id="segment_result")
summary_statistic_type_cfg = Config.configure_data_node(id="summary_statistic_type", default_data="median")
high_value_summary_df_cfg = Config.configure_data_node(id="high_value_summary_df")
# -------------------- Tasks --------------------
preprocess_and_score_task_cfg = Config.configure_task(
id="preprocess_and_score",
function=preprocess_and_score,
skippable=True,
input=[path_to_data_cfg],
output=[scored_df_cfg],
)
featurization_and_segmentation_task_cfg = Config.configure_task(
id="featurization_and_segmentation",
function=featurization_and_segmentation,
skippable=True,
input=[scored_df_cfg, payment_threshold_cfg, score_threshold_cfg],
output=[segmented_customer_df_cfg],
)
segment_analysis_task_cfg = Config.configure_task(
id="segment_analysis",
function=segment_analysis,
skippable=True,
input=[segmented_customer_df_cfg, metric_cfg],
output=[segment_result_cfg],
)
high_value_cust_summary_statistics_task_cfg = Config.configure_task(
id="high_value_cust_summary_statistics",
function=high_value_cust_summary_statistics,
skippable=True,
input=[segment_result_cfg, segmented_customer_df_cfg, summary_statistic_type_cfg],
output=[high_value_summary_df_cfg],
)
scenario_cfg = Config.configure_scenario(
id="scenario_1",
task_configs=[
preprocess_and_score_task_cfg,
featurization_and_segmentation_task_cfg,
segment_analysis_task_cfg,
high_value_cust_summary_statistics_task_cfg,
],
)
|
# Create app for demo-dask-customer-analysis algo.py
import time
import dask.dataframe as dd
import pandas as pd
def preprocess_and_score(path_to_original_data: str):
print("__________________________________________________________")
print("1. TASK 1: DATA PREPROCESSING AND CUSTOMER SCORING ...")
start_time = time.perf_counter() # Start the timer
# Step 1: Read data using Dask
df = dd.read_csv(path_to_original_data)
# Step 2: Simplify the customer scoring formula
df["CUSTOMER_SCORE"] = (
0.5 * df["TotalPurchaseAmount"] / 1000 + 0.3 * df["NumberOfPurchases"] / 10 + 0.2 * df["AverageReviewScore"]
)
# Save all customers to a new CSV file
scored_df = df[["CUSTOMER_SCORE", "TotalPurchaseAmount", "NumberOfPurchases", "TotalPurchaseTime"]]
pd_df = scored_df.compute()
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return pd_df
def featurization_and_segmentation(scored_df, payment_threshold, score_threshold):
print("__________________________________________________________")
print("2. TASK 2: FEATURE ENGINEERING AND SEGMENTATION ...")
# payment_threshold, score_threshold = float(payment_threshold), float(score_threshold)
start_time = time.perf_counter() # Start the timer
df = scored_df
# Feature: Indicator if customer's total purchase is above the payment threshold
df["HighSpender"] = (df["TotalPurchaseAmount"] > payment_threshold).astype(int)
# Feature: Average time between purchases
df["AverageTimeBetweenPurchases"] = df["TotalPurchaseTime"] / df["NumberOfPurchases"]
# Additional computationally intensive features
df["Interaction1"] = df["TotalPurchaseAmount"] * df["NumberOfPurchases"]
df["Interaction2"] = df["TotalPurchaseTime"] * df["CUSTOMER_SCORE"]
df["PolynomialFeature"] = df["TotalPurchaseAmount"] ** 2
# Segment customers based on the score_threshold
df["ValueSegment"] = ["High Value" if score > score_threshold else "Low Value" for score in df["CUSTOMER_SCORE"]]
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return df
def segment_analysis(df: pd.DataFrame, metric):
print("__________________________________________________________")
print("3. TASK 3: SEGMENT ANALYSIS ...")
start_time = time.perf_counter() # Start the timer
# Detailed analysis for each segment: mean/median of various metrics
segment_analysis = (
df.groupby("ValueSegment")
.agg(
{
"CUSTOMER_SCORE": metric,
"TotalPurchaseAmount": metric,
"NumberOfPurchases": metric,
"TotalPurchaseTime": metric,
"HighSpender": "sum", # Total number of high spenders in each segment
"AverageTimeBetweenPurchases": metric,
}
)
.reset_index()
)
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return segment_analysis
def high_value_cust_summary_statistics(df: pd.DataFrame, segment_analysis: pd.DataFrame, summary_statistic_type: str):
print("__________________________________________________________")
print("4. TASK 4: ADDITIONAL ANALYSIS BASED ON SEGMENT ANALYSIS ...")
start_time = time.perf_counter() # Start the timer
# Filter out the High Value customers
high_value_customers = df[df["ValueSegment"] == "High Value"]
# Use summary_statistic_type to calculate different types of summary statistics
if summary_statistic_type == "mean":
average_purchase_high_value = high_value_customers["TotalPurchaseAmount"].mean()
elif summary_statistic_type == "median":
average_purchase_high_value = high_value_customers["TotalPurchaseAmount"].median()
elif summary_statistic_type == "max":
average_purchase_high_value = high_value_customers["TotalPurchaseAmount"].max()
elif summary_statistic_type == "min":
average_purchase_high_value = high_value_customers["TotalPurchaseAmount"].min()
median_score_high_value = high_value_customers["CUSTOMER_SCORE"].median()
# Fetch the summary statistic for 'TotalPurchaseAmount' for High Value customers from segment_analysis
segment_statistic_high_value = segment_analysis.loc[
segment_analysis["ValueSegment"] == "High Value", "TotalPurchaseAmount"
].values[0]
# Create a DataFrame to hold the results
result_df = pd.DataFrame(
{
"SummaryStatisticType": [summary_statistic_type],
"AveragePurchaseHighValue": [average_purchase_high_value],
"MedianScoreHighValue": [median_score_high_value],
"SegmentAnalysisHighValue": [segment_statistic_high_value],
}
)
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return result_df
if __name__ == "__main__":
t1 = preprocess_and_score("data/customers_data.csv")
t2 = featurization_and_segmentation(t1, 1500, 1.5)
t3 = segment_analysis(t2, "mean")
t4 = high_value_cust_summary_statistics(t2, t3, "mean")
print(t4)
|
# Create app for demo-taipy-gui-starter-1 main.py
from taipy.gui import Gui
from math import cos, exp
page = """
#This is *Taipy* GUI
A value: <|{decay}|>.
A slider: <br/>
<|{decay}|slider|>
My chart:
<|{data}|chart|>
"""
def compute_data(decay):
return [cos(i/16) * exp(-i*decay/6000) for i in range(720)]
def on_change(state, var_name, var_value):
if var_name == 'decay':
state.data = compute_data(var_value)
decay = 10
data = compute_data(decay)
Gui(page=page).run(title='Taipy Demo GUI 1',
dark_mode=False)
|
# Create app for demo-churn-classification main.py
import pandas as pd
import taipy as tp
from taipy.gui import Gui, Icon, navigate
from config.config import scenario_cfg
from taipy.config import Config
from pages.main_dialog import *
import warnings
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
# Load configuration
Config.load('config/config.toml')
scenario_cfg = Config.scenarios['churn_classification']
# Execute the scenario
tp.Core().run()
def create_first_scenario(scenario_cfg):
"""Create and submit the first scenario."""
scenario = tp.create_scenario(scenario_cfg)
tp.submit(scenario)
return scenario
scenario = create_first_scenario(scenario_cfg)
# Read datasets
train_dataset = scenario.train_dataset.read()
test_dataset = scenario.test_dataset.read()
roc_dataset = scenario.roc_data_ml.read()
# Process test dataset columns
test_dataset.columns = [str(column).upper() for column in test_dataset.columns]
# Prepare data for visualization
select_x = test_dataset.drop('EXITED',axis=1).columns.tolist()
x_selected = select_x[0]
select_y = select_x
y_selected = select_y[1]
# Read results and create charts
values = scenario.results_ml.read()
forecast_series = values['Forecast']
scatter_dataset_pred = creation_scatter_dataset_pred(test_dataset, forecast_series)
histo_full_pred = creation_histo_full_pred(test_dataset, forecast_series)
histo_full = creation_histo_full(test_dataset)
scatter_dataset = creation_scatter_dataset(test_dataset)
features_table = scenario.feature_importance_ml.read()
accuracy_graph, f1_score_graph, score_auc_graph = compare_models_baseline(scenario, ['ml', 'baseline'])
def create_charts(model_type):
"""Create pie charts and metrics for the given model type."""
metrics = c_update_metrics(scenario, model_type)
(number_of_predictions, accuracy, f1_score, score_auc,
number_of_good_predictions, number_of_false_predictions,
fp_, tp_, fn_, tn_) = metrics
pie_plotly = pd.DataFrame({
"values": [number_of_good_predictions, number_of_false_predictions],
"labels": ["Correct predictions", "False predictions"]
})
distrib_class = pd.DataFrame({
"values": [len(values[values["Historical"]==0]), len(values[values["Historical"]==1])],
"labels": ["Stayed", "Exited"]
})
score_table = pd.DataFrame({
"Score": ["Predicted stayed", "Predicted exited"],
"Stayed": [tn_, fp_],
"Exited": [fn_, tp_]
})
pie_confusion_matrix = pd.DataFrame({
"values": [tp_, tn_, fp_, fn_],
"labels": ["True Positive", "True Negative", "False Positive", "False Negative"]
})
return (number_of_predictions, number_of_false_predictions, number_of_good_predictions,
accuracy, f1_score, score_auc, pie_plotly, distrib_class, score_table, pie_confusion_matrix)
# Initialize charts
chart_metrics = create_charts('ml')
(number_of_predictions, number_of_false_predictions, number_of_good_predictions,
accuracy, f1_score, score_auc, pie_plotly, distrib_class, score_table, pie_confusion_matrix) = chart_metrics
def on_change(state, var_name, var_value):
"""Handle variable changes in the GUI."""
if var_name in ['x_selected', 'y_selected']:
update_histogram_and_scatter(state)
elif var_name == 'mm_algorithm_selected':
update_variables(state, var_value.lower())
elif var_name in ['mm_algorithm_selected', 'db_table_selected']:
handle_temp_csv_path(state)
# GUI initialization
menu_lov = [
("Data Visualization", Icon('images/histogram_menu.svg', 'Data Visualization')),
("Model Manager", Icon('images/model.svg', 'Model Manager')),
("Compare Models", Icon('images/compare.svg', 'Compare Models')),
('Databases', Icon('images/Datanode.svg', 'Databases'))
]
root_md = """
<|toggle|theme|>
<|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|>
"""
page = "Data Visualization"
def menu_fct(state, var_name, var_value):
"""Function that is called when there is a change in the menu control."""
state.page = var_value['args'][0]
navigate(state, state.page.replace(" ", "-"))
def update_variables(state, model_type):
"""Update the different variables and dataframes used in the application."""
global scenario
state.values = scenario.data_nodes[f'results_{model_type}'].read()
state.forecast_series = state.values['Forecast']
metrics = c_update_metrics(scenario, model_type)
(state.number_of_predictions, state.accuracy, state.f1_score, state.score_auc,
number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_) = metrics
update_charts(state, model_type, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_)
def update_charts(state, model_type, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_):
"""This function updates all the charts of the GUI.
Args:
state: object containing all the variables used in the GUI
model_type (str): the name of the model_type shown
number_of_good_predictions (int): number of good predictions
number_of_false_predictions (int): number of false predictions
fp_ (float): false positive rate
tp_ (float): true positive rate
fn_ (float): false negative rate
tn_ (float): true negative rate
"""
state.roc_dataset = scenario.data_nodes[f'roc_data_{model_type}'].read()
state.features_table = scenario.data_nodes[f'feature_importance_{model_type}'].read()
state.score_table = pd.DataFrame({"Score":["Predicted stayed", "Predicted exited"],
"Stayed": [tn_, fp_],
"Exited" : [fn_, tp_]})
state.pie_confusion_matrix = pd.DataFrame({"values": [tp_, tn_, fp_, fn_],
"labels" : ["True Positive", "True Negative", "False Positive", "False Negative"]})
state.scatter_dataset_pred = creation_scatter_dataset_pred(test_dataset, state.forecast_series)
state.histo_full_pred = creation_histo_full_pred(test_dataset, state.forecast_series)
# pie charts
state.pie_plotly = pd.DataFrame({"values": [number_of_good_predictions, number_of_false_predictions],
"labels": ["Correct predictions", "False predictions"]})
state.distrib_class = pd.DataFrame({"values": [len(state.values[state.values["Historical"]==0]),
len(state.values[state.values["Historical"]==1])],
"labels" : ["Stayed", "Exited"]})
def on_init(state):
update_histogram_and_scatter(state)
# Define pages
pages = {
"/": root_md + dialog_md,
"Data-Visualization": dv_data_visualization_md,
"Model-Manager": mm_model_manager_md,
"Compare-Models": cm_compare_models_md,
"Databases": db_databases_md,
}
# Run the GUI
if __name__ == '__main__':
gui = Gui(pages=pages)
gui.run(title="Churn classification", dark_mode=False, port=8494)
|
# Create app for demo-churn-classification config.py
from algos.algos import *
from taipy import Config, Scope
##############################################################################################################################
# Creation of the datanodes
##############################################################################################################################
# How to connect to the database
path_to_csv = 'data/churn.csv'
# path for csv and file_path for pickle
initial_dataset_cfg = Config.configure_data_node(id="initial_dataset",
path=path_to_csv,
storage_type="csv",
has_header=True)
date_cfg = Config.configure_data_node(id="date", default_data="None")
preprocessed_dataset_cfg = Config.configure_data_node(id="preprocessed_dataset")
# the final datanode that contains the processed data
train_dataset_cfg = Config.configure_data_node(id="train_dataset")
# the final datanode that contains the processed data
trained_model_ml_cfg = Config.configure_data_node(id="trained_model_ml")
trained_model_baseline_cfg= Config.configure_data_node(id="trained_model_baseline")
# the final datanode that contains the processed data
test_dataset_cfg = Config.configure_data_node(id="test_dataset")
forecast_dataset_ml_cfg = Config.configure_data_node(id="forecast_dataset_ml")
forecast_dataset_baseline_cfg = Config.configure_data_node(id="forecast_dataset_baseline")
roc_data_ml_cfg = Config.configure_data_node(id="roc_data_ml")
roc_data_baseline_cfg = Config.configure_data_node(id="roc_data_baseline")
score_auc_ml_cfg = Config.configure_data_node(id="score_auc_ml")
score_auc_baseline_cfg = Config.configure_data_node(id="score_auc_baseline")
metrics_ml_cfg = Config.configure_data_node(id="metrics_ml")
metrics_baseline_cfg = Config.configure_data_node(id="metrics_baseline")
feature_importance_ml_cfg = Config.configure_data_node(id="feature_importance_ml")
feature_importance_baseline_cfg = Config.configure_data_node(id="feature_importance_baseline")
results_ml_cfg = Config.configure_data_node(id="results_ml")
results_baseline_cfg = Config.configure_data_node(id="results_baseline")
##############################################################################################################################
# Creation of the tasks
##############################################################################################################################
# the task will make the link between the input data node
# and the output data node while executing the function
# initial_dataset --> preprocess dataset --> preprocessed_dataset
task_preprocess_dataset_cfg = Config.configure_task(id="preprocess_dataset",
input=[initial_dataset_cfg,date_cfg],
function=preprocess_dataset,
output=preprocessed_dataset_cfg)
# preprocessed_dataset --> create train data --> train_dataset, test_dataset
task_create_train_test_cfg = Config.configure_task(id="create_train_and_test_data",
input=preprocessed_dataset_cfg,
function=create_train_test_data,
output=[train_dataset_cfg, test_dataset_cfg])
# train_dataset --> create train_model data --> trained_model
task_train_model_baseline_cfg = Config.configure_task(id="train_model_baseline",
input=train_dataset_cfg,
function=train_model_baseline,
output=[trained_model_baseline_cfg,feature_importance_baseline_cfg])
# train_dataset --> create train_model data --> trained_model
task_train_model_ml_cfg = Config.configure_task(id="train_model_ml",
input=train_dataset_cfg,
function=train_model_ml,
output=[trained_model_ml_cfg,feature_importance_ml_cfg])
# test_dataset --> forecast --> forecast_dataset
task_forecast_baseline_cfg = Config.configure_task(id="predict_the_test_data_baseline",
input=[test_dataset_cfg, trained_model_baseline_cfg],
function=forecast,
output=forecast_dataset_baseline_cfg)
# test_dataset --> forecast --> forecast_dataset
task_forecast_ml_cfg = Config.configure_task(id="predict_the_test_data_ml",
input=[test_dataset_cfg, trained_model_ml_cfg],
function=forecast,
output=forecast_dataset_ml_cfg)
task_roc_ml_cfg = Config.configure_task(id="task_roc_ml",
input=[forecast_dataset_ml_cfg, test_dataset_cfg],
function=roc_from_scratch,
output=[roc_data_ml_cfg,score_auc_ml_cfg])
task_roc_baseline_cfg = Config.configure_task(id="task_roc_baseline",
input=[forecast_dataset_baseline_cfg, test_dataset_cfg],
function=roc_from_scratch,
output=[roc_data_baseline_cfg,score_auc_baseline_cfg])
task_create_metrics_baseline_cfg = Config.configure_task(id="task_create_metrics_baseline",
input=[forecast_dataset_baseline_cfg,test_dataset_cfg],
function=create_metrics,
output=metrics_baseline_cfg)
task_create_metrics_ml_cfg = Config.configure_task(id="task_create_metrics",
input=[forecast_dataset_ml_cfg,test_dataset_cfg],
function=create_metrics,
output=metrics_ml_cfg)
task_create_results_baseline_cfg = Config.configure_task(id="task_create_results_baseline",
input=[forecast_dataset_baseline_cfg,test_dataset_cfg],
function=create_results,
output=results_baseline_cfg)
task_create_results_ml_cfg = Config.configure_task(id="task_create_results_ml",
input=[forecast_dataset_ml_cfg,test_dataset_cfg],
function=create_results,
output=results_ml_cfg)
##############################################################################################################################
# Creation of the scenario
##############################################################################################################################
scenario_cfg = Config.configure_scenario(id="churn_classification",
task_configs=[task_create_metrics_baseline_cfg,
task_create_metrics_ml_cfg,
task_create_results_baseline_cfg,
task_create_results_ml_cfg,
task_forecast_baseline_cfg,
task_forecast_ml_cfg,
task_roc_ml_cfg,
task_roc_baseline_cfg,
task_train_model_baseline_cfg,
task_train_model_ml_cfg,
task_preprocess_dataset_cfg,
task_create_train_test_cfg])
Config.export('config/config.toml')
|
# Create app for demo-churn-classification algos.py
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import datetime as dt
import pandas as pd
import numpy as np
##############################################################################################################################
# Function used in the tasks
##############################################################################################################################
def preprocess_dataset(initial_dataset: pd.DataFrame, date: dt.datetime="None"):
"""This function preprocess the dataset to be used in the model
Args:
initial_dataset (pd.DataFrame): the raw format when we first read the data
Returns:
pd.DataFrame: the preprocessed dataset for classification
"""
print("\n Preprocessing the dataset...")
#We filter the dataframe on the date
if date != "None":
initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date'])
processed_dataset = initial_dataset[initial_dataset['Date'] <= date]
print(len(processed_dataset))
else:
processed_dataset = initial_dataset
processed_dataset = processed_dataset[['CreditScore','Geography','Gender','Age','Tenure','Balance','NumOfProducts','HasCrCard','IsActiveMember','EstimatedSalary','Exited']]
processed_dataset = pd.get_dummies(processed_dataset)
if 'Gender_Female' in processed_dataset.columns:
processed_dataset.drop('Gender_Female',axis=1,inplace=True)
processed_dataset = processed_dataset.apply(pd.to_numeric)
columns_to_select = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard',
'IsActiveMember', 'EstimatedSalary', 'Geography_France', 'Geography_Germany',
'Geography_Spain', 'Gender_Male','Exited']
processed_dataset = processed_dataset[[col for col in columns_to_select if col in processed_dataset.columns]]
print(" Preprocessing done!\n")
return processed_dataset
def create_train_test_data(preprocessed_dataset: pd.DataFrame):
"""This function will create the train data by segmenting the dataset
Args:
preprocessed_dataset (pd.DataFrame): the preprocessed dataset
Returns:
pd.DataFrame: the training dataset
"""
print("\n Creating the training and testing dataset...")
X_train, X_test, y_train, y_test = train_test_split(preprocessed_dataset.iloc[:,:-1],preprocessed_dataset.iloc[:,-1],test_size=0.2,random_state=42)
train_data = pd.concat([X_train,y_train],axis=1)
test_data = pd.concat([X_test,y_test],axis=1)
print(" Creating done!")
return train_data, test_data
def train_model_baseline(train_dataset: pd.DataFrame):
"""Function to train the Logistic Regression model
Args:
train_dataset (pd.DataFrame): the training dataset
Returns:
model (LogisticRegression): the fitted model
"""
print(" Training the model...\n")
X,y = train_dataset.iloc[:,:-1],train_dataset.iloc[:,-1]
model_fitted = LogisticRegression().fit(X,y)
print("\n ",model_fitted," is trained!")
importance_dict = {'Features' : X.columns, 'Importance':model_fitted.coef_[0]}
importance = pd.DataFrame(importance_dict).sort_values(by='Importance',ascending=True)
return model_fitted, importance
def train_model_ml(train_dataset: pd.DataFrame):
"""Function to train the Logistic Regression model
Args:
train_dataset (pd.DataFrame): the training dataset
Returns:
model (RandomForest): the fitted model
"""
print(" Training the model...\n")
X,y = train_dataset.iloc[:,:-1],train_dataset.iloc[:,-1]
model_fitted = RandomForestClassifier().fit(X,y)
print("\n ",model_fitted," is trained!")
importance_dict = {'Features' : X.columns, 'Importance':model_fitted.feature_importances_}
importance = pd.DataFrame(importance_dict).sort_values(by='Importance',ascending=True)
return model_fitted, importance
def forecast(test_dataset: pd.DataFrame, trained_model: RandomForestClassifier):
"""Function to forecast the test dataset
Args:
test_dataset (pd.DataFrame): the test dataset
trained_model (LogisticRegression): the fitted model
Returns:
forecast (pd.DataFrame): the forecasted dataset
"""
print(" Forecasting the test dataset...")
X,y = test_dataset.iloc[:,:-1],test_dataset.iloc[:,-1]
#predictions = trained_model.predict(X)
predictions = trained_model.predict_proba(X)[:, 1]
print(" Forecasting done!")
return predictions
def roc_from_scratch(probabilities, test_dataset, partitions=100):
print(" Calculation of the ROC curve...")
y_test = test_dataset.iloc[:,-1]
roc = np.array([])
for i in range(partitions + 1):
threshold_vector = np.greater_equal(probabilities, i / partitions).astype(int)
tpr, fpr = true_false_positive(threshold_vector, y_test)
roc = np.append(roc, [fpr, tpr])
roc_np = roc.reshape(-1, 2)
roc_data = pd.DataFrame({"False positive rate": roc_np[:, 0], "True positive rate": roc_np[:, 1]})
print(" Calculation done")
print(" Scoring...")
score_auc = roc_auc_score(y_test, probabilities)
print(" Scoring done\n")
return roc_data, score_auc
def true_false_positive(threshold_vector:np.array, y_test:np.array):
"""Function to calculate the true positive rate and the false positive rate
Args:
threshold_vector (np.array): the test dataset
y_test (np.array): the fitted model
Returns:
tpr (pd.DataFrame): the forecasted dataset
fpr (pd.DataFrame): the forecasted dataset
"""
true_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 1)
true_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 0)
false_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 0)
false_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 1)
tpr = true_positive.sum() / (true_positive.sum() + false_negative.sum())
fpr = false_positive.sum() / (false_positive.sum() + true_negative.sum())
return tpr, fpr
def create_metrics(predictions:np.array, test_dataset:np.array):
print(" Creating the metrics...")
threshold = 0.5
threshold_vector = np.greater_equal(predictions, threshold).astype(int)
y_test = test_dataset.iloc[:,-1]
true_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 1)).sum()
true_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 0)).sum()
false_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 0)).sum()
false_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 1)).sum()
f1_score = np.around(2*true_positive/(2*true_positive+false_positive+false_negative), decimals=2)
accuracy = np.around((true_positive+true_negative)/(true_positive+true_negative+false_positive+false_negative), decimals=2)
dict_ftpn = {"tp": true_positive, "tn": true_negative, "fp": false_positive, "fn": false_negative}
number_of_good_predictions = true_positive + true_negative
number_of_false_predictions = false_positive + false_negative
metrics = {"f1_score": f1_score,
"accuracy": accuracy,
"dict_ftpn": dict_ftpn,
'number_of_predictions': len(predictions),
'number_of_good_predictions':number_of_good_predictions,
'number_of_false_predictions':number_of_false_predictions}
return metrics
def create_results(forecast_values,test_dataset):
forecast_series_proba = pd.Series(np.around(forecast_values,decimals=2), index=test_dataset.index, name='Probability')
forecast_series = pd.Series((forecast_values>0.5).astype(int), index=test_dataset.index, name='Forecast')
true_series = pd.Series(test_dataset.iloc[:,-1], name="Historical",index=test_dataset.index)
index_series = pd.Series(range(len(true_series)), index=test_dataset.index, name="Id")
results = pd.concat([index_series, forecast_series_proba, forecast_series, true_series], axis=1)
return results
|
# Create app for demo-churn-classification main_dialog.py
from pages.compare_models_md import *
from pages.data_visualization_md import *
from pages.databases_md import *
from pages.model_manager_md import *
dr_show_roc = False
dialog_md = """
<|dialog|open={dr_show_roc}|title=ROC Curve|on_action={lambda s: s.assign("dr_show_roc", False)}|labels=Close|width=1000px|
<|{roc_dataset}|chart|x=False positive rate|y[1]=True positive rate|label[1]=True positive rate|height=500px|width=900px|type=scatter|>
|>
"""
|
# Create app for demo-churn-classification databases_md.py
import pathlib
# This path is used to create a temporary CSV file download the table
tempdir = pathlib.Path(".tmp")
tempdir.mkdir(exist_ok=True)
PATH_TO_TABLE = str(tempdir / "table.csv")
# Selector to select the table to show
db_table_selector = ['Training Dataset', 'Test Dataset', 'Forecast Dataset', 'Confusion Matrix']
db_table_selected = db_table_selector[0]
def handle_temp_csv_path(state):
"""This function checks if the temporary csv file exists. If it does, it is deleted. Then, the temporary csv file
is created for the right table
Args:
state: object containing all the variables used in the GUI
"""
if state.db_table_selected == 'Test Dataset':
state.test_dataset.to_csv(PATH_TO_TABLE, sep=';')
if state.db_table_selected == 'Confusion Matrix':
state.score_table.to_csv(PATH_TO_TABLE, sep=';')
if state.db_table_selected == "Training Dataset":
state.train_dataset.to_csv(PATH_TO_TABLE, sep=';')
if state.db_table_selected == "Forecast Dataset":
state.values.to_csv(PATH_TO_TABLE, sep=';')
# Aggregation of the strings to create the complete page
db_databases_md = """
# Data**bases**{: .color-primary}
<|layout|columns=2 2 1|
<|{mm_algorithm_selected}|selector|lov={mm_algorithm_selector}|dropdown|label=Algorithm|active=False|>
<|{db_table_selected}|selector|lov={db_table_selector}|dropdown|label=Table|>
<|{PATH_TO_TABLE}|file_download|name=table.csv|label=Download table|>
|>
<Confusion|part|render={db_table_selected=='Confusion Matrix'}|
<|{score_table}|table|width=fit-content|show_all|class_name=ml-auto mr-auto|>
|Confusion>
<Training|part|render={db_table_selected=='Training Dataset'}|
<|{train_dataset}|table|>
|Training>
<Forecast|part|render={db_table_selected=='Forecast Dataset'}|
<|{values}|table|width=fit-content|style={lambda s,i,r: 'red_color' if r['Historical']!=r['Forecast'] else 'green_color'}|class_name=ml-auto mr-auto|>
|Forecast>
<test_dataset|part|render={db_table_selected=='Test Dataset'}|
<|{test_dataset}|table|>
|test_dataset>
"""
|
# Create app for demo-churn-classification data_visualization_md.py
import pandas as pd
import numpy as np
dv_graph_selector = ['Histogram','Scatter']
dv_graph_selected = dv_graph_selector[0]
# Histograms dialog
properties_histo_full = {}
properties_scatter_dataset = {}
def creation_scatter_dataset(test_dataset:pd.DataFrame):
"""This function creates the dataset for the scatter plot. For every column (except Exited), scatter_dataset will have a positive and negative version.
The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one.
Args:
test_dataset (pd.DataFrame): the test dataset
Returns:
pd.DataFrame: the datafram
"""
scatter_dataset = test_dataset.copy()
for column in scatter_dataset.columns:
if column != 'EXITED' :
column_neg = str(column)+'_neg'
column_pos = str(column)+'_pos'
scatter_dataset[column_neg] = scatter_dataset[column]
scatter_dataset[column_pos] = scatter_dataset[column]
scatter_dataset.loc[(scatter_dataset['EXITED'] == 1),column_neg] = np.NaN
scatter_dataset.loc[(scatter_dataset['EXITED'] == 0),column_pos] = np.NaN
return scatter_dataset
def creation_histo_full(test_dataset:pd.DataFrame):
"""This function creates the dataset for the histogram plot. For every column (except Exited), histo_full will have a positive and negative version.
The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one.
Args:
test_dataset (pd.DataFrame): the test dataset
Returns:
pd.DataFrame: the Dataframe used to display the Histogram
"""
histo_full = test_dataset.copy()
for column in histo_full.columns:
column_neg = str(column)+'_neg'
histo_full[column_neg] = histo_full[column]
histo_full.loc[(histo_full['EXITED'] == 1),column_neg] = np.NaN
histo_full.loc[(histo_full['EXITED'] == 0),column] = np.NaN
return histo_full
def update_histogram_and_scatter(state):
global x_selected, y_selected
x_selected = state.x_selected
y_selected = state.y_selected
state.properties_scatter_dataset = {"x":x_selected,
"y[1]":y_selected+'_pos',
"y[2]":y_selected+'_neg'}
state.scatter_dataset = state.scatter_dataset
state.scatter_dataset_pred = state.scatter_dataset_pred
state.properties_histo_full = {"x[1]":x_selected,
"x[2]":x_selected+'_neg'}
state.histo_full = state.histo_full
state.histo_full_pred = state.histo_full_pred
dv_data_visualization_md = """
# Data **Visualization**{: .color-primary}
<|{dv_graph_selected}|toggle|lov={dv_graph_selector}|>
--------------------------------------------------------------------
<|part|render={dv_graph_selected == 'Histogram'}|
### Histogram
<|{x_selected}|selector|lov={select_x}|dropdown=True|label=Select x|>
<|{histo_full}|chart|type=histogram|properties={properties_histo_full}|rebuild|y=EXITED|label=EXITED|color[1]=red|color[2]=green|name[1]=Exited|name[2]=Stayed|height=600px|>
|>
<|part|render={dv_graph_selected == 'Scatter'}|
### Scatter
<|layout|columns= 1 2|
<|{x_selected}|selector|lov={select_x}|dropdown|label=Select x|>
<|{y_selected}|selector|lov={select_y}|dropdown|label=Select y|>
|>
<|{scatter_dataset}|chart|properties={properties_scatter_dataset}|rebuild|color[1]=red|color[2]=green|name[1]=Exited|name[2]=Stayed|mode=markers|type=scatter|height=600px|>
|>
"""
|
# Create app for demo-churn-classification compare_models_md.py
import numpy as np
from sklearn.metrics import f1_score
import pandas as pd
import numpy as np
cm_height_histo = "100%"
cm_dict_barmode = {"barmode": "stack","margin":{"t":30}}
cm_options_md = "height={cm_height_histo}|width={cm_height_histo}|layout={cm_dict_barmode}"
cm_compare_models_md = """
# Model comparison
----
<br/>
<br/>
<br/>
<|layout|columns= 1 1 1|columns[mobile]=1|
<|{accuracy_graph}|chart|type=bar|x=Model Type|y[1]=Accuracy Model|y[2]=Accuracy Baseline|title=Accuracy|""" + cm_options_md + """|>
<|{f1_score_graph}|chart|type=bar|x=Model Type|y[1]=F1 Score Model|y[2]=F1 Score Baseline|title=F1 Score|""" + cm_options_md + """|>
<|{score_auc_graph}|chart|type=bar|x=Model Type|y[1]=AUC Score Model|y[2]=AUC Score Baseline|title=AUC Score|""" + cm_options_md + """|>
|>
"""
def compare_charts(accuracies, f1_scores, scores_auc, names):
"""This funcion creates the pandas Dataframes (charts) used in the model comparison page
Args:
accuracies (list): list of accuracies
f1_scores (list): list of f1 scores
scores_auc (list): list of auc scores
names (list): list of scenario names
Returns:
pd.DataFrame: the resulting three pd.DataFrame
"""
accuracy_graph = pd.DataFrame(create_metric_dict(accuracies, "Accuracy", names))
f1_score_graph = pd.DataFrame(create_metric_dict(f1_scores, "F1 Score", names))
score_auc_graph = pd.DataFrame(create_metric_dict(scores_auc, "AUC Score", names))
return accuracy_graph, f1_score_graph, score_auc_graph
def compare_models_baseline(scenario,model_types):
"""This function creates the objects for the model comparison
Args:
scenario (scenario): the selected scenario
model_types (str): the name of the selected model type
Returns:
pd.DataFrame: the resulting three pd.DataFrame
"""
accuracies = []
f1_scores = []
scores_auc = []
names = []
for model_type in model_types:
(_,accuracy,f1_score,score_auc,_,_,_,_,_,_) = c_update_metrics(scenario, model_type)
accuracies.append(accuracy)
f1_scores.append(f1_score)
scores_auc.append(score_auc)
names.append('Model' if model_type != "baseline" else "Baseline")
accuracy_graph,f1_score_graph, score_auc_graph = compare_charts(accuracies, f1_scores, scores_auc, names)
return accuracy_graph, f1_score_graph, score_auc_graph
def create_metric_dict(metric, metric_name, names):
"""This function creates a dictionary of metrics for multiple models that will be used in a Dataframe shown on the Gui
Args:
metric (list): the value of the metric
metric_name (str): the name of the metric
names (list): list of scenario names
Returns:
dict: dicitonary used for a pandas Dataframe
"""
metric_dict = {}
initial_list = [0]*len(names)
metric_dict["Model Type"] = names
for i in range(len(names)):
current_list = initial_list.copy()
current_list[i] = metric[i]
metric_dict[metric_name +" "+ names[i].capitalize()] = current_list
return metric_dict
def c_update_metrics(scenario, model_type):
"""This function updates the metrics of a scenario using a model
Args:
scenario (scenario): the selected scenario
model_type (str): the name of the selected model_type
Returns:
obj: a number of values, lists that represent the metrics
"""
metrics = scenario.data_nodes[f'metrics_{model_type}'].read()
number_of_predictions = metrics['number_of_predictions']
number_of_good_predictions = metrics['number_of_good_predictions']
number_of_false_predictions = metrics['number_of_false_predictions']
accuracy = np.around(metrics['accuracy'], decimals=2)
f1_score = np.around(metrics['f1_score'], decimals=2)
score_auc = np.around(scenario.data_nodes[f'score_auc_{model_type}'].read(), decimals=2)
dict_ftpn = metrics['dict_ftpn']
fp_ = dict_ftpn['fp']
tp_ = dict_ftpn['tp']
fn_ = dict_ftpn['fn']
tn_ = dict_ftpn['tn']
return number_of_predictions, accuracy, f1_score, score_auc, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_
|
# Create app for demo-churn-classification model_manager_md.py
import pandas as pd
import numpy as np
mm_graph_selector_scenario = ['Metrics', 'Features', 'Histogram','Scatter']
mm_graph_selected_scenario = mm_graph_selector_scenario[0]
mm_algorithm_selector = ['Baseline', 'ML']
mm_algorithm_selected = 'ML'
mm_pie_color_dict_2 = {"piecolorway":["#00D08A","#FE913C"]}
mm_pie_color_dict_4 = {"piecolorway":["#00D08A","#81F1A0","#F3C178","#FE913C"]}
mm_margin_features = {'margin': {'l': 150}}
def creation_scatter_dataset_pred(test_dataset:pd.DataFrame, forecast_series:pd.Series):
"""This function creates the dataset for the scatter plot for the predictions. For every column (except EXITED) will have a positive and negative version.
EXITED is here a binary indicating if the prediction is good or bad.
The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one.
Args:
test_dataset (pd.DataFrame): the test dataset
forecast_series (pd.DataFrame): the forecast dataset
Returns:
pd.DataFrame: the Dataframe used to display the Histogram
"""
scatter_dataset = test_dataset.copy()
scatter_dataset['EXITED'] = (scatter_dataset['EXITED']!=forecast_series.to_numpy()).astype(int)
for column in scatter_dataset.columns:
if column != 'EXITED' :
column_neg = str(column)+'_neg'
column_pos = str(column)+'_pos'
scatter_dataset[column_neg] = scatter_dataset[column]
scatter_dataset[column_pos] = scatter_dataset[column]
scatter_dataset.loc[(scatter_dataset['EXITED'] == 1),column_neg] = np.NaN
scatter_dataset.loc[(scatter_dataset['EXITED'] == 0),column_pos] = np.NaN
return scatter_dataset
def creation_histo_full_pred(test_dataset:pd.DataFrame,forecast_series:pd.Series):
"""This function creates the dataset for the histogram plot for the predictions. For every column (except PREDICTION) will have a positive and negative version.
PREDICTION is a binary indicating if the prediction is good or bad.
The positive column will have NaN when the PREDICTION is zero and the negative column will have NaN when the PREDICTION is one.
Args:
test_dataset (pd.DataFrame): the test dataset
forecast_series (pd.DataFrame): the forecast dataset
Returns:
pd.DataFrame: the Dataframe used to display the Histogram
"""
histo_full = test_dataset.copy()
histo_full['EXITED'] = (histo_full['EXITED']!=forecast_series.to_numpy()).astype(int)
histo_full.columns = histo_full.columns.str.replace('EXITED', 'PREDICTION')
for column in histo_full.columns:
column_neg = str(column)+'_neg'
histo_full[column_neg] = histo_full[column]
histo_full.loc[(histo_full['PREDICTION'] == 1),column_neg] = np.NaN
histo_full.loc[(histo_full['PREDICTION'] == 0),column] = np.NaN
return histo_full
mm_model_manager_md = """
# **Model**{: .color-primary} Manager
<|layout|columns=3 2 2 2|
<|{mm_graph_selected_scenario}|toggle|lov={mm_graph_selector_scenario}|>
<|{mm_algorithm_selected}|selector|lov={mm_algorithm_selector}|dropdown|label=Algorithm|>
<|show roc|button|on_action={lambda s: s.assign("dr_show_roc", True)}|>
<br/> **Number of predictions:** <|{number_of_predictions}|>
|>
-----------------------------------------------------------------
<Metrics|part|render={mm_graph_selected_scenario == 'Metrics'}|
### Metrics
<|layout|columns=1 1 1|columns[mobile]=1|
<accuracy|
<|{accuracy}|indicator|value={accuracy}|min=0|max=1|>
**Model accuracy**
{: .text-center}
<|{pie_plotly}|chart|title=Accuracy of predictions model|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_2}|>
|accuracy>
<score_auc|
<|{score_auc}|indicator|value={score_auc}|min=0|max=1|>
**Model AUC**
{: .text-center}
<|{pie_confusion_matrix}|chart|title=Confusion Matrix|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_4}|>
|score_auc>
<f1_score|
<|{f1_score}|indicator|value={f1_score}|min=0|max=1|>
**Model F1-score**
{: .text-center}
<|{distrib_class}|chart|title=Distribution between Exited and Stayed|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_2}|>
|f1_score>
|>
|Metrics>
<Features|part|render={mm_graph_selected_scenario == 'Features'}|
### Features
<|{features_table}|chart|type=bar|y=Features|x=Importance|orientation=h|layout={mm_margin_features}|>
|Features>
<Histogram|part|render={mm_graph_selected_scenario == 'Histogram'}|
### Histogram
<|{x_selected}|selector|lov={select_x}|dropdown|label=Select x|>
<|{histo_full_pred}|chart|type=histogram|properties={properties_histo_full}|rebuild|y=PREDICTION|label=PREDICTION|color[1]=red|color[2]=green|name[1]=Good Predictions|name[2]=Bad Predictions|height=600px|>
|Histogram>
<Scatter|part|render={mm_graph_selected_scenario == 'Scatter'}|
### Scatter
<|layout|columns=1 2|
<|{x_selected}|selector|lov={select_x}|dropdown|label=Select x|>
<|{y_selected}|selector|lov={select_y}|dropdown=True|label=Select y|>
|>
<|{scatter_dataset_pred}|chart|properties={properties_scatter_dataset}|rebuild|color[1]=red|color[2]=green|name[1]=Bad prediction|name[2]=Good prediction|mode=markers|type=scatter|height=600px|>
|Scatter>
"""
|
# Create app for demo-stock-visualization main.py
from taipy.gui import Gui, notify
from datetime import date
import yfinance as yf
from prophet import Prophet
import pandas as pd
# Parameters for retrieving the stock data
start_date = "2015-01-01"
end_date = date.today().strftime("%Y-%m-%d")
selected_stock = 'AAPL'
n_years = 1
def get_stock_data(ticker, start, end):
ticker_data = yf.download(ticker, start, end) # downloading the stock data from START to TODAY
ticker_data.reset_index(inplace=True) # put date in the first column
ticker_data['Date'] = pd.to_datetime(ticker_data['Date']).dt.tz_localize(None)
return ticker_data
def get_data_from_range(state):
print("GENERATING HIST DATA")
start_date = state.start_date if type(state.start_date)==str else state.start_date.strftime("%Y-%m-%d")
end_date = state.end_date if type(state.end_date)==str else state.end_date.strftime("%Y-%m-%d")
state.data = get_stock_data(state.selected_stock, start_date, end_date)
if len(state.data) == 0:
notify(state, "error", f"Not able to download data {state.selected_stock} from {start_date} to {end_date}")
return
notify(state, 's', 'Historical data has been updated!')
notify(state, 'w', 'Deleting previous predictions...')
state.forecast = pd.DataFrame(columns=['Date', 'Lower', 'Upper'])
def generate_forecast_data(data, n_years):
# FORECASTING
df_train = data[['Date', 'Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"}) # This is the format that Prophet accepts
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=n_years * 365)
fc = m.predict(future)[['ds', 'yhat_lower', 'yhat_upper']].rename(columns={"ds": "Date", "yhat_lower": "Lower", "yhat_upper": "Upper"})
print("Process Completed!")
return fc
def forecast_display(state):
notify(state, 'i', 'Predicting...')
state.forecast = generate_forecast_data(state.data, state.n_years)
notify(state, 's', 'Prediction done! Forecast data has been updated!')
#### Getting the data, make initial forcast and build a front end web-app with Taipy GUI
data = get_stock_data(selected_stock, start_date, end_date)
forecast = generate_forecast_data(data, n_years)
show_dialog = False
partial_md = "<|{forecast}|table|>"
dialog_md = "<|{show_dialog}|dialog|partial={partial}|title=Forecast Data|on_action={lambda state: state.assign('show_dialog', False)}|>"
page = dialog_md + """<|toggle|theme|>
<|container|
# Stock Price **Analysis**{: .color-primary} Dashboard
<|layout|columns=1 2 1|gap=40px|class_name=card p2|
<dates|
#### Selected **Period**{: .color-primary}
From:
<|{start_date}|date|on_change=get_data_from_range|>
To:
<|{end_date}|date|on_change=get_data_from_range|>
|dates>
<ticker|
#### Selected **Ticker**{: .color-primary}
Please enter a valid ticker:
<|{selected_stock}|input|label=Stock|on_action=get_data_from_range|>
or choose a popular one
<|{selected_stock}|toggle|lov=MSFT;GOOG;AAPL; AMZN; META; COIN; AMC; PYPL|on_change=get_data_from_range|>
|ticker>
<years|
#### Prediction **years**{: .color-primary}
Select number of prediction years: <|{n_years}|>
<|{n_years}|slider|min=1|max=5|>
<|PREDICT|button|on_action=forecast_display|class_name={'plain' if len(forecast)==0 else ''}|>
|years>
|>
<|Historical Data|expandable|expanded=False|
<|layout|columns=1 1|
<|
### Historical **closing**{: .color-primary} price
<|{data}|chart|mode=line|x=Date|y[1]=Open|y[2]=Close|>
|>
<|
### Historical **daily**{: .color-primary} trading volume
<|{data}|chart|mode=line|x=Date|y=Volume|>
|>
|>
### **Whole**{: .color-primary} historical data: <|{selected_stock}|text|raw|>
<|{data}|table|>
<br/>
|>
### **Forecast**{: .color-primary} Data
<|{forecast}|chart|mode=line|x=Date|y[1]=Lower|y[2]=Upper|>
<br/>
<|More info|button|on_action={lambda s: s.assign("show_dialog", True)}|>
{: .text-center}
|>
<br/>
"""
# Run Taipy GUI
gui = Gui(page)
partial = gui.add_partial(partial_md)
gui.run(dark_mode=False, title="Stock Visualization")
|
# Create app for demo-movie-genre main.py
import taipy as tp
import pandas as pd
from taipy import Config, Scope, Gui
# Create a Taipy App that will output the 7 best movies for a genre
# Taipy Core - backend definition
# Filter function for Task
def filtering_genre(initial_dataset: pd.DataFrame, selected_genre):
filtered_dataset = initial_dataset[initial_dataset['genres'].str.contains(selected_genre)]
filtered_data = filtered_dataset.nlargest(7, 'Popularity %')
return filtered_data
# Input Data Nodes configuration
initial_dataset_cfg = Config.configure_data_node(id="initial_dataset",
storage_type="csv",
path="data.csv",
scope=Scope.GLOBAL)
selected_genre_cfg = Config.configure_data_node(id="selected_genre_node",
default_data="ACTION",
scope=Scope.GLOBAL)
# Output Data Node configuration
filtered_data_cfg = Config.configure_data_node(id="filtered_data",
scope=Scope.GLOBAL)
# Task configuration
filter_task_cfg = Config.configure_task(id="filter_genre",
function=filtering_genre,
input=[initial_dataset_cfg, selected_genre_cfg],
output=filtered_data_cfg,
skippable=True)
# Pipeline configuration
pipeline_cfg = Config.configure_pipeline(id="pipeline",
task_configs=[filter_task_cfg])
# Scenario configuration
scenario_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[pipeline_cfg])
# Run of the Taipy Core service
tp.Core().run()
# Creation of my scenario
scenario = tp.create_scenario(scenario_cfg)
# Taipy GUI- front end definition
# Callback definition
def modify_df(state):
scenario.selected_genre_node.write(state.selected_genre)
tp.submit(scenario)
state.df = scenario.filtered_data.read()
# Get list of genres
list_genres = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Fantasy', 'IMAX', 'Romance',
'Sci-FI', 'Western', 'Crime', 'Mystery', 'Drama', 'Horror', 'Thriller', 'Film-Noir',
'War', 'Musical', 'Documentary']
# Initialization of variables
df = pd.DataFrame(columns=['Title', 'Popularity %'])
selected_genre = None
# movie_genre_app
movie_genre_app = """
# Film recommendation
## Choose your favorite genre
<|{selected_genre}|selector|lov={list_genres}|on_change=modify_df|dropdown|>
## Here are the top 7 picks
<|{df}|chart|x=Title|y=Popularity %|type=bar|title=Film Popularity|>
"""
# run the app
Gui(page=movie_genre_app).run()
|
# Create app for demo-job-monitoring __init__.py
|
# Create app for demo-job-monitoring runtime.py
from taipy import run
class App:
"""A singleton class that provides the Taipy runtime objects."""
def __new__(cls):
if not hasattr(cls, "instance"):
cls.instance = super(App, cls).__new__(cls)
return cls.instance
@property
def gui(self):
return self.__gui
@property
def core(self):
return self.__core
@gui.setter
def gui(self, gui):
self.__gui = gui
@core.setter
def core(self, core):
self.__core = core
def start(self, **kwargs):
# Starts the app by calling `taipy.run` on the core and gui objects:
run(self.__gui, self.__core, **kwargs)
|
# Create app for demo-job-monitoring main.py
from runtime import App
from pages import root, monitoring
import taipy
from taipy.config.config import Config
from taipy.gui import Gui
import os
# Variables for bindings
all_jobs = [['','','','']]
show_dialog_run_pipeline = False
selected_pipeline = None
show_details_pane = False
selected_job = None
if __name__ == "__main__":
# Initialize Taipy objects
Config.configure_job_executions(mode="standalone", nb_of_workers=4)
Config.load("app.config.toml")
App().core = taipy.Core()
App().gui = Gui(pages={"/": root.page, "monitoring": monitoring.page})
# Start the app
App().start(
title="Job Monitoring Demo",
port=os.environ.get("PORT", "8080"),
dark_mode=False,
css_file="app",
)
|
# Create app for demo-job-monitoring __init__.py
|
# Create app for demo-job-monitoring ml.py
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
# Test prediction with a Female, 19 years old, earning 20000
fixed_value = [1, 19, 20000]
def preprocess(df: pd.DataFrame) -> pd.DataFrame:
def _gender_to_int(gender):
if gender == "Female":
return 1
return 0
df["GenderNum"] = df["Gender"].apply(_gender_to_int)
return df
def train(dataset):
# X (features) are "GenderNum", "Age", "EstimatedSalary"
X = dataset[["GenderNum", "Age", "EstimatedSalary"]]
# Y is "Purchased"
Y = dataset[["Purchased"]]
# Let's split the dataset: the first 50 will be used for training,
# the rest will be for testing
split = 50
X_train, Y_train = X[:split], Y[:split]
X_test, Y_test = X[split:], Y[split:]
# Using scikit-learn default
regression = LogisticRegression(random_state=0, max_iter=10000).fit(
X_train.values, Y_train.values.ravel()
)
# Accuracy of our model:
print(f"intercept: {regression.intercept_} coefficients: {regression.coef_}")
print(f"train accuracy: {regression.score(X_train, Y_train)}")
print(f"test accuracy: {regression.score(X_test, Y_test)}") # We aim for > 0.8...
return regression
def predict(x, regression: LogisticRegression):
variables = np.array(x).reshape(1, -1)
result = regression.predict(variables)
print(f"for: {variables}, the prediction is {result}")
return result
if __name__ == "__main__":
# Testing
df = pd.read_csv("data/data.csv")
df = preprocess(df)
model = train(df)
print(predict(fixed_value, model))
|
# Create app for demo-job-monitoring debug.py
import time
def long_running(anything):
print("Waiting 20 seconds...")
time.sleep(20)
print("Done!")
return anything
def raise_exception(anything):
print("Waiting 5 seconds before raising an exception...")
time.sleep(5)
raise Exception("A very expected error occured!")
|
# Create app for demo-job-monitoring monitoring.py
import taipy as tp
from taipy.gui import get_state_id, invoke_callback, Markdown
from taipy.config.config import Config
from taipy.core.job.job import Job
from runtime import App
def get_all_jobs():
"""Returns all the known jobs (as a array of fields)."""
def _job_to_fields(job: Job) -> list[str]:
return [
job.submit_id,
job.id,
job.creation_date.strftime("%b %d %Y %H:%M:%S"),
str(job.status),
]
return [_job_to_fields(job) for job in tp.get_jobs()]
def get_all_pipelines():
"""Returns all pipelines (as an array of ids)"""
return [
pipeline.id
for pipeline in Config.pipelines.values()
if pipeline.id != "default" # we explicitely get rid of the "default" pipeline
]
def get_job_by_id(id):
"""Return a job from its id"""
found = [job for job in tp.get_jobs() if job.id == id]
if found:
return found[0]
return None
def get_job_by_index(index):
"""Return a job from its index"""
all_jobs = tp.get_jobs()
if len(all_jobs) > index:
return all_jobs[index]
return None
def get_status(job: Job):
"""Get the status of the given job as string."""
if not job:
return None
return job.status.name.lower()
# -----------------------------------------------------------------------------
# Callbacks / UI function
def on_style(state, index, row):
status_index = 3
if 'RUNNING' in row[status_index]:
return 'blue'
if 'COMPLETED' in row[status_index]:
return 'green'
if 'BLOCKED' in row[status_index]:
return 'orange'
if 'FAILED' in row[status_index]:
return 'red'
def refresh_job_list(state):
"""Refresh the job list"""
state.all_jobs = get_all_jobs()
def job_updated(state_id, pipeline, job):
"""Callback called when a job has been updated."""
# invoke_callback allows to run a function with a GUI _state_.
invoke_callback(App().gui, state_id, refresh_job_list, args=[])
def open_run_pipeline_dialog(state):
"""Opens the 'Run pipeline...' dialog."""
state.show_dialog_run_pipeline = True
def close_run_pipeline_dialog(state):
"""Closes the 'Run pipeline...' dialog."""
state.show_dialog_run_pipeline = False
def run_pipeline(state):
"""Runs a pipeline action."""
# We need to pass the state ID so that it can be restored in the job_updated listener:
state_id = get_state_id(state)
# Get selected pipeline config:
selected = state.selected_pipeline
pipeline_config = Config.pipelines[selected]
if not pipeline_config:
raise Exception(f"unknown pipeline config: {selected}")
# Close the dialog
close_run_pipeline_dialog(state)
pipeline = tp.create_pipeline(pipeline_config)
tp.subscribe_pipeline(pipeline=pipeline, callback=job_updated, params=[state_id])
tp.submit(pipeline)
def on_table_click(state, table, action, payload):
job_index = payload["index"]
selected_job = get_job_by_index(job_index)
state.selected_job = selected_job
state.show_details_pane = True
def cancel_selected_job(state):
job_id = state.selected_job.id
tp.cancel_job(state.selected_job)
state.show_details_pane = False
refresh_job_list(state)
state.selected_job = get_job_by_id(job_id)
def delete_selected_job(state):
tp.delete_job(state.selected_job, force=True)
state.show_details_pane = False
refresh_job_list(state)
# -----------------------------------------------------------------------------
# UI Configuration
columns = {
"0": {"title": "Submit ID"},
"1": {"title": "Job ID"},
"2": {"title": "Creation Date"},
"3": {"title": "Status"},
}
# -----------------------------------------------------------------------------
# Page
page = Markdown("job_monitoring/pages/monitoring.md")
|
# Create app for demo-job-monitoring __init__.py
|
# Create app for demo-job-monitoring root.py
from taipy.gui import Markdown
content = """
# Job Monitoring Demo
"""
page = Markdown(content)
|
# Create app for demo-job-monitoring monitoring.md
<|{all_jobs}|table|columns={columns}|width='100%'|on_action={on_table_click}|style=on_style|>
<|Refresh List|button|on_action={refresh_job_list}|>
<|Run Pipeline...|button|on_action={open_run_pipeline_dialog}|>
<|{show_dialog_run_pipeline}|dialog|title=Run pipeline...|
<|{selected_pipeline}|selector|lov={get_all_pipelines()}|>
<|Run|button|on_action={run_pipeline}|>
<|Cancel|button|on_action={close_run_pipeline_dialog}|>
|>
<|{show_details_pane}|pane|
# Job Details <|Delete|button|on_action=delete_selected_job|> <|Cancel|button|on_action=cancel_selected_job|>
<|layout|columns=1 1|
<|part|class_name=card|
## Task
<|{selected_job.task.config_id}|>
|>
<|part|class_name=card|
## Status
<|{get_status(selected_job)}|>
|>
|>
<|part|class_name=card|
## ID
<|{selected_job.id}|>
|>
<|part|class_name=card|
## Submission ID
<|{selected_job.submit_id}|>
|>
<|part|class_name=card|
## Creation Date
<|{selected_job.creation_date.strftime("%b %d %y %H:%M:%S")}|>
|>
<|part|class_name=card|
## Stacktrace
<|{"\n".join(selected_job.stacktrace)}|class_name=code|>
|>
----
|>
|
# Create app for demo-fraud-detection charts.py
""" Prepare data for charts """
import pandas as pd
def gen_amt_data(transactions: pd.DataFrame) -> list:
"""
Create a list of amt values for fraudulent and non-fraudulent transactions
Args:
- transactions: the transactions dataframe
Returns:
- a list of two dictionaries containing the data for the two histograms
"""
amt_fraud = transactions[transactions["fraud"]]["amt"]
amt_no_fraud = transactions[~transactions["fraud"]]["amt"]
amt_data = [
{"Amount ($)": list(amt_no_fraud)},
{"Amount ($)": list(amt_fraud)},
]
return amt_data
def gen_gender_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Create a dataframe containing the percentage of fraudulent transactions
per gender
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
male_fraud_percentage = len(
transactions[transactions["fraud"]].loc[transactions["gender"] == "M"]
) / len(transactions[transactions["fraud"]])
female_fraud_percentage = 1 - male_fraud_percentage
male_not_fraud_percentage = len(
transactions[~transactions["fraud"]].loc[transactions["gender"] == "M"]
) / len(transactions[~transactions["fraud"]])
female_not_fraud_percentage = 1 - male_not_fraud_percentage
gender_data = pd.DataFrame(
{
"Fraudulence": ["Not Fraud", "Fraud"],
"Male": [male_not_fraud_percentage, male_fraud_percentage],
"Female": [female_not_fraud_percentage, female_fraud_percentage],
}
)
return gender_data
def gen_cat_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage difference
between fraudulent and non-fraudulent transactions per category
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
categories = transactions["category"].unique()
fraud_categories = [
len(
transactions[transactions["fraud"]].loc[
transactions["category"] == category
]
)
for category in categories
]
fraud_categories_norm = [
category / len(transactions[transactions["fraud"]])
for category in fraud_categories
]
not_fraud_categories = [
len(
transactions[~transactions["fraud"]].loc[
transactions["category"] == category
]
)
for category in categories
]
not_fraud_categories_norm = [
category / len(transactions[~transactions["fraud"]])
for category in not_fraud_categories
]
diff_categories = [
fraud_categories_norm[i] - not_fraud_categories_norm[i]
for i in range(len(categories))
]
cat_data = pd.DataFrame(
{
"Category": categories,
"Difference": diff_categories,
}
)
cat_data = cat_data.sort_values(by="Difference", ascending=False)
return cat_data
def gen_age_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage of fraudulent transactions
per age
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
age = range(111)
fraud_age = [
len(transactions[transactions["fraud"]].loc[transactions["age"] == age])
/ len(transactions[transactions["fraud"]])
for age in age
]
not_fraud_age = [
len(transactions[~transactions["fraud"]].loc[transactions["age"] == age])
/ len(transactions[~transactions["fraud"]])
for age in age
]
age_data = pd.DataFrame(
{
"Age": age,
"Fraud": fraud_age,
"Not Fraud": not_fraud_age,
}
)
return age_data
def gen_hour_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage of fraudulent transactions
per hour
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
hours = range(1, 25)
fraud_hours = [
len(transactions[transactions["fraud"]].loc[transactions["hour"] == hour])
/ len(transactions[transactions["fraud"]])
for hour in hours
]
not_fraud_hours = [
len(transactions[~transactions["fraud"]].loc[transactions["hour"] == hour])
/ len(transactions[~transactions["fraud"]])
for hour in hours
]
hour_data = pd.DataFrame(
{
"Hour": hours,
"Fraud": fraud_hours,
"Not Fraud": not_fraud_hours,
}
)
return hour_data
def gen_day_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage of fraudulent transactions
per weekday
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
days = range(7)
days_names = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
fraud_days = [
len(transactions[transactions["fraud"]].loc[transactions["day"] == day])
/ len(transactions[transactions["fraud"]])
for day in days
]
not_fraud_days = [
len(transactions[~transactions["fraud"]].loc[transactions["day"] == day])
/ len(transactions[~transactions["fraud"]])
for day in days
]
day_data = pd.DataFrame(
{
"Day": days_names,
"Fraud": fraud_days,
"Not Fraud": not_fraud_days,
}
)
return day_data
def gen_month_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage of fraudulent transactions
per month
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
months = range(1, 13)
months_names = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
fraud_months = [
len(transactions[transactions["fraud"]].loc[transactions["month"] == month])
/ len(transactions[transactions["fraud"]])
for month in months
]
not_fraud_months = [
len(transactions[~transactions["fraud"]].loc[transactions["month"] == month])
/ len(transactions[~transactions["fraud"]])
for month in months
]
month_data = pd.DataFrame(
{
"Month": months_names,
"Fraud": fraud_months,
"Not Fraud": not_fraud_months,
}
)
return month_data
|
# Create app for demo-fraud-detection utils.py
""" Data Manipulation and Callbacks """
import datetime as dt
import numpy as np
import pandas as pd
from taipy.gui import State, navigate, notify
import xgboost as xgb
from shap import Explainer, Explanation
from sklearn.metrics import confusion_matrix
column_names = [
"amt",
"zip",
"city_pop",
"age",
"hour",
"day",
"month",
"category_food_dining",
"category_gas_transport",
"category_grocery_net",
"category_grocery_pos",
"category_health_fitness",
"category_home",
"category_kids_pets",
"category_misc_net",
"category_misc_pos",
"category_personal_care",
"category_shopping_net",
"category_shopping_pos",
"category_travel",
]
def explain_pred(state: State, _: str, payload: dict) -> None:
"""
When a transaction is selected in the table
Explain the prediction using SHAP, update the waterfall chart
Args:
- state: the state of the app
- payload: the payload of the event containing the index of the transaction
"""
idx = payload["index"]
exp = state.explaination[idx]
feature_values = [-value for value in list(exp.values)]
data_values = list(exp.data)
for i, value in enumerate(data_values):
if isinstance(value, float):
value = round(value, 2)
data_values[i] = value
names = [f"{name}: {value}" for name, value in zip(column_names, data_values)]
exp_data = pd.DataFrame({"Feature": names, "Influence": feature_values})
exp_data["abs_importance"] = exp_data["Influence"].abs()
exp_data = exp_data.sort_values(by="abs_importance", ascending=False)
exp_data = exp_data.drop(columns=["abs_importance"])
exp_data = exp_data[:5]
state.exp_data = exp_data
if state.transactions.iloc[idx]["fraud"]:
state.fraud_text = "Why is this transaction fraudulent?"
else:
state.fraud_text = "Why is this transaction not fraudulent?"
first = state.transactions.iloc[idx]["first"]
last = state.transactions.iloc[idx]["last"]
state.specific_transactions = state.transactions[
(state.transactions["first"] == first) & (state.transactions["last"] == last)
]
state.selected_transaction = state.transactions.loc[[idx]]
state.selected_client = f"{first} {last}"
navigate(state, "Analysis")
def generate_transactions(
state: State,
df: pd.DataFrame,
model: xgb.XGBRegressor,
threshold: float,
start_date="2020-06-21",
end_date="2030-01-01",
) -> [pd.DataFrame, Explanation]:
"""
Generates a DataFrame of transactions with the fraud prediction
Args:
- state: the state of the app
- df: the DataFrame containing the transactions
- model: the model used to predict the fraud
- threshold: the threshold used to determine if a transaction is fraudulent
- start_date: the start date of the transactions
- end_date: the end date of the transactions
Returns:
- a DataFrame of transactions with the fraud prediction
"""
start_date = str(start_date)
end_date = str(end_date)
start_date_dt = dt.datetime.strptime(start_date, "%Y-%m-%d")
end_date_dt = dt.datetime.strptime(end_date, "%Y-%m-%d")
# Make sure the dates are separated by at least one day
if (end_date_dt - start_date_dt).days < 1:
notify(state, "error", "The start date must be before the end date")
raise Exception("The start date must be before the end date")
# Make sure that start_date is between 2020-06-21 and 2020-06-30
if not (dt.datetime(2020, 6, 21) <= start_date_dt <= dt.datetime(2020, 6, 30)):
notify(
state, "error", "The start date must be between 2020-06-21 and 2020-06-30"
)
raise Exception("The start date must be between 2020-06-21 and 2020-06-30")
df["age"] = dt.date.today().year - pd.to_datetime(df["dob"]).dt.year
df["hour"] = pd.to_datetime(df["trans_date_trans_time"]).dt.hour
df["day"] = pd.to_datetime(df["trans_date_trans_time"]).dt.dayofweek
df["month"] = pd.to_datetime(df["trans_date_trans_time"]).dt.month
test = df[
[
"category",
"amt",
"zip",
"city_pop",
"age",
"hour",
"day",
"month",
"is_fraud",
]
]
test = pd.get_dummies(test, drop_first=True)
test = test[df["trans_date_trans_time"].between(str(start_date), str(end_date))]
X_test = test.drop("is_fraud", axis="columns")
X_test_values = X_test.values
transactions = df[
df["trans_date_trans_time"].between(str(start_date), str(end_date))
]
raw_results = model.predict(X_test_values)
results = [str(min(1, round(result, 2))) for result in raw_results]
transactions.insert(0, "fraud_value", results)
# Low if under 0.2, Medium if under 0.5, High if over 0.5
results = ["Low" if float(result) < 0.2 else "Medium" for result in raw_results]
for i, result in enumerate(results):
if result == "Medium" and float(raw_results[i]) > 0.5:
results[i] = "High"
transactions.insert(0, "fraud_confidence", results)
results = [float(result) > threshold for result in raw_results]
transactions.insert(0, "fraud", results)
explainer = Explainer(model)
sv = explainer(X_test)
explaination = Explanation(sv, sv.base_values, X_test, feature_names=X_test.columns)
# Drop Unnamed: 0 column if it exists
if "Unnamed: 0" in transactions.columns:
transactions = transactions.drop(columns=["Unnamed: 0"])
return transactions, explaination
def update_threshold(state: State) -> None:
"""
Change the threshold used to determine if a transaction is fraudulent
Generate the confusion matrix
Args:
- state: the state of the app
"""
threshold = float(state.threshold)
results = [
float(result) > threshold for result in state.transactions["fraud_value"]
]
state.transactions["fraud"] = results
state.transactions = state.transactions
results = [
float(result) > threshold
for result in state.original_transactions["fraud_value"]
]
state.original_transactions["fraud"] = results
state.original_transactions = state.original_transactions
y_pred = results
y_true = state.original_transactions["is_fraud"]
cm = confusion_matrix(y_true, y_pred)
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
tp, tn, fp, fn = cm[1][1], cm[0][0], cm[0][1], cm[1][0]
dataset = state.original_transactions[:10000]
state.true_positives = dataset[
(dataset["is_fraud"] == True) & (dataset["fraud"] == True)
]
state.true_negatives = dataset[
(dataset["is_fraud"] == False) & (dataset["fraud"] == False)
]
state.false_positives = dataset[
(dataset["is_fraud"] == False) & (dataset["fraud"] == True)
]
state.false_negatives = dataset[
(dataset["is_fraud"] == True) & (dataset["fraud"] == False)
]
data = {
"Values": [
[fn, tp],
[tn, fp],
],
"Actual": ["Fraud", "Not Fraud"],
"Predicted": ["Not Fraud", "Fraud"],
}
layout = {
"annotations": [],
"xaxis": {"ticks": "", "side": "top"},
"yaxis": {"ticks": "", "ticksuffix": " "},
}
predicted = data["Predicted"]
actuals = data["Actual"]
for actual, _ in enumerate(actuals):
for pred, _ in enumerate(predicted):
value = data["Values"][actual][pred]
annotation = {
"x": predicted[pred],
"y": actuals[actual],
"text": f"{str(round(value, 3)*100)[:4]}%",
"font": {"color": "white" if value < 0.5 else "black", "size": 30},
"showarrow": False,
}
layout["annotations"].append(annotation)
state.confusion_data = data
state.confusion_layout = layout
update_table(state)
return (
state.true_positives,
state.true_negatives,
state.false_positives,
state.false_negatives,
)
def update_table(state: State) -> None:
"""
Updates the table of transactions displayed
Args:
- state: the state of the app
"""
if state.selected_table == "True Positives":
state.displayed_table = state.true_positives
elif state.selected_table == "False Positives":
state.displayed_table = state.false_positives
elif state.selected_table == "True Negatives":
state.displayed_table = state.true_negatives
elif state.selected_table == "False Negatives":
state.displayed_table = state.false_negatives
|
# Create app for demo-fraud-detection main.py
""" Fraud Detection App """
import pickle
import numpy as np
import pandas as pd
from taipy.gui import Gui, Icon, State, navigate, notify
from utils import (
explain_pred,
generate_transactions,
update_threshold,
update_table,
)
from charts import *
DATA_POINTS = 30000
threshold = "0.5"
threshold_lov = np.arange(0, 1, 0.01)
confusion_text = "Confusion Matrix"
fraud_text = "No row selected"
exp_data = pd.DataFrame({"Feature": [], "Influence": []})
df = pd.read_csv("data/fraud_data.csv")
df["merchant"] = df["merchant"].str[6:]
model = pickle.load(open("model.pkl", "rb"))
transactions, explaination = generate_transactions(None, df, model, float(threshold))
original_transactions = transactions
original_explaination = explaination
specific_transactions = transactions
selected_client = "No client selected"
start_date = "2020-06-21"
end_date = "2020-06-22"
selected_table = "True Positives"
true_positives = None
false_positives = None
true_negatives = None
false_negatives = None
displayed_table = None
selected_transaction = None
def fraud_style(_: State, index: int, values: list) -> str:
"""
Style the transactions table: red if fraudulent
Args:
- state: the state of the app
- index: the index of the row
Returns:
- the style of the row
"""
if values["fraud_confidence"] == "High":
return "red-row"
elif values["fraud_confidence"] == "Medium":
return "orange-row"
return ""
amt_data = gen_amt_data(transactions)
gender_data = gen_gender_data(transactions)
cat_data = gen_cat_data(transactions)
age_data = gen_age_data(transactions)
hour_data = gen_hour_data(transactions)
day_data = gen_day_data(transactions)
month_data = gen_month_data(transactions)
df = df[:DATA_POINTS]
transactions = transactions[:DATA_POINTS]
waterfall_layout = {
"margin": {"b": 150},
}
amt_options = [
{
"marker": {"color": "#4A4", "opacity": 0.8},
"xbins": {"start": 0, "end": 2000, "size": 10},
"histnorm": "probability",
},
{
"marker": {"color": "#A33", "opacity": 0.8, "text": "Compare Data"},
"xbins": {"start": 0, "end": 2000, "size": 10},
"histnorm": "probability",
},
]
amt_layout = {
"barmode": "overlay",
"showlegend": True,
}
confusion_data = pd.DataFrame({"Predicted": [], "Actual": [], "Values": []})
confusion_layout = None
confusion_options = {"colorscale": "YlOrRd", "displayModeBar": False}
confusion_config = {"scrollZoom": False, "displayModeBar": False}
transactions = df
transactions = transactions.drop("Unnamed: 0", axis="columns")
def on_init(state: State) -> None:
"""
Generate the confusion matrix on start
Args:
- state: the state of the app
"""
update_transactions(state)
state.displayed_table = state.true_positives
(
state.true_positives,
state.true_negatives,
state.false_positives,
state.false_negatives,
) = update_threshold(state)
update_table(state)
def update_transactions(state: State) -> None:
"""
Detects frauds in the selected time period
Args:
- state: the state of the app
"""
notify(state, "info", "Predicting fraud...")
state.transactions, state.explaination = generate_transactions(
state, df, model, float(state.threshold), state.start_date, state.end_date
)
state.transactions.reset_index(inplace=True)
number_of_fraud = len(state.transactions[state.transactions["fraud"] == True])
notify(state, "success", f"Predicted {number_of_fraud} fraudulent transactions")
menu_lov = [
("Transactions", Icon("images/transactions.png", "Transactions")),
("Analysis", Icon("images/analysis.png", "Analysis")),
("Fraud Distribution", Icon("images/distribution.png", "Fraud Distribution")),
("Threshold Selection", Icon("images/threshold.png", "Threshold Selection")),
]
page = "Transactions"
def menu_fct(state, var_name, var_value):
"""Function that is called when there is a change in the menu control."""
state.page = var_value["args"][0]
navigate(state, state.page.replace(" ", "-"))
ROOT = """
<|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|>
"""
TRANSACTIONS_PAGE = """
# List of **Transactions**{: .color-primary}
--------------------------------------------------------------------
## Select start and end date for a prediction
<|layout|columns=1 1 3|
Start Date: <|{start_date}|date|>
End Date (excluding): <|{end_date}|date|>
|>
<|Detect Frauds|button|on_action=update_transactions|>
## Select a transaction to explain the prediction
<|{transactions}|table|on_action=explain_pred|style=fraud_style|filter|rebuild|>
"""
ANALYSIS_PAGE = """
# Prediction **Analysis**{: .color-primary}
--------------------------------------------------------------------
<|layout|columns=2 3|
<|card|
## <|{fraud_text}|text|>
<|{exp_data}|chart|type=waterfall|x=Feature|y=Influence|layout={waterfall_layout}|>
|>
<|
## Selected Transaction:
<|{selected_transaction}|table|show_all=True|rebuild||style=fraud_style|>
## Transactions of client: **<|{selected_client}|text|raw|>**{: .color-primary}
<|{specific_transactions}|table|style=fraud_style|filter|on_action=explain_pred|>
|>
|>
"""
CHART_PAGE = """
# Fraud **Distribution**{: .color-primary}
--------------------------------------------------------------------
## Charts of fraud distribution by feature
<|{amt_data}|chart|type=histogram|title=Transaction Amount Distribution|color[2]=red|color[1]=green|name[2]=Fraud|name[1]=Not Fraud|options={amt_options}|layout={amt_layout}|>
<br/><|{gender_data}|chart|type=bar|x=Fraudulence|y[1]=Male|y[2]=Female|title=Distribution of Fraud by Gender|>
<br/><|{cat_data}|chart|type=bar|x=Category|y=Difference|orientation=v|title=Difference in Fraudulence by Category (Positive = Fraudulent)|>
<br/><|{hour_data}|chart|type=bar|x=Hour|y[1]=Not Fraud|y[2]=Fraud|title=Distribution of Fraud by Hour|>
<br/><|{day_data}|chart|type=bar|x=Day|y[1]=Not Fraud|y[2]=Fraud|title=Distribution of Fraud by Day|>
"""
THRESHOLD_PAGE = """
# Threshold **Selection**{: .color-primary}
--------------------------------------------------------------------
## Select a threshold of confidence to filter the transactions
<|{threshold}|slider|on_change=update_threshold|lov=0.05;0.1;0.15;0.2;0.25;0.3;0.35;0.4;0.45;0.5;0.55;0.6;0.65;0.7;0.75;0.8;0.85;0.9;0.95|>
<|layout|columns=1 2|
<|{confusion_data}|chart|type=heatmap|z=Values|x=Predicted|y=Actual|layout={confusion_layout}|options={confusion_options}|plot_config={confusion_config}|height=70vh|>
<|card
<|{selected_table}|selector|lov=True Positives;False Positives;True Negatives;False Negatives|on_change=update_table|dropdown=True|>
<|{displayed_table}|table|style=fraud_style|filter|rebuild|>
|>
|>
"""
pages = {
"/": ROOT,
"Transactions": TRANSACTIONS_PAGE,
"Analysis": ANALYSIS_PAGE,
"Fraud-Distribution": CHART_PAGE,
"Threshold-Selection": THRESHOLD_PAGE,
}
Gui(pages=pages).run(title="Fraud Detection Demo", dark_mode=False, debug=True)
|
# Create app for dask_taipy_bigdata_DEMO algo.py
import time
import pandas as pd
import dask.dataframe as dd
def task1(path_to_original_data: str):
print("__________________________________________________________")
print("1. TASK 1: DATA PREPROCESSING AND CUSTOMER SCORING ...")
start_time = time.perf_counter() # Start the timer
# Step 1: Read data using Dask
df = dd.read_csv(path_to_original_data)
# Step 2: Simplify the customer scoring formula
df['CUSTOMER_SCORE'] = (
0.5 * df['TotalPurchaseAmount'] / 1000 +
0.3 * df['NumberOfPurchases'] / 10 +
0.2 * df['AverageReviewScore']
)
# Save all customers to a new CSV file
scored_df = df[["CUSTOMER_SCORE", "TotalPurchaseAmount", "NumberOfPurchases", "TotalPurchaseTime"]]
pd_df = scored_df.compute()
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return pd_df
def task2(scored_df, payment_threshold, score_threshold):
print("__________________________________________________________")
print("2. TASK 2: FEATURE ENGINEERING AND SEGMENTATION ...")
payment_threshold, score_threshold = float(payment_threshold), float(score_threshold)
start_time = time.perf_counter() # Start the timer
df = scored_df
# Feature: Indicator if customer's total purchase is above the payment threshold
df['HighSpender'] = (df['TotalPurchaseAmount'] > payment_threshold).astype(int)
# Feature: Average time between purchases
df['AverageTimeBetweenPurchases'] = df['TotalPurchaseTime'] / df['NumberOfPurchases']
# Additional computationally intensive features
df['Interaction1'] = df['TotalPurchaseAmount'] * df['NumberOfPurchases']
df['Interaction2'] = df['TotalPurchaseTime'] * df['CUSTOMER_SCORE']
df['PolynomialFeature'] = df['TotalPurchaseAmount'] ** 2
# Segment customers based on the score_threshold
df['ValueSegment'] = ['High Value' if score > score_threshold else 'Low Value' for score in df['CUSTOMER_SCORE']]
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return df
def task3(df: pd.DataFrame, metric):
print("__________________________________________________________")
print("3. TASK 3: SEGMENT ANALYSIS ...")
start_time = time.perf_counter() # Start the timer
# Detailed analysis for each segment: mean/median of various metrics
segment_analysis = df.groupby('ValueSegment').agg({
'CUSTOMER_SCORE': metric,
'TotalPurchaseAmount': metric,
'NumberOfPurchases': metric,
'TotalPurchaseTime': metric,
'HighSpender': 'sum', # Total number of high spenders in each segment
'AverageTimeBetweenPurchases': metric
}).reset_index()
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return segment_analysis
def task4(df: pd.DataFrame, segment_analysis: pd.DataFrame, summary_statistic_type: str):
print("__________________________________________________________")
print("4. TASK 4: ADDITIONAL ANALYSIS BASED ON SEGMENT ANALYSIS ...")
start_time = time.perf_counter() # Start the timer
# Filter out the High Value customers
high_value_customers = df[df['ValueSegment'] == 'High Value']
# Use summary_statistic_type to calculate different types of summary statistics
if summary_statistic_type == 'mean':
average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].mean()
elif summary_statistic_type == 'median':
average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].median()
elif summary_statistic_type == 'max':
average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].max()
elif summary_statistic_type == 'min':
average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].min()
median_score_high_value = high_value_customers['CUSTOMER_SCORE'].median()
# Fetch the summary statistic for 'TotalPurchaseAmount' for High Value customers from segment_analysis
segment_statistic_high_value = segment_analysis.loc[segment_analysis['ValueSegment'] == 'High Value', 'TotalPurchaseAmount'].values[0]
# Create a DataFrame to hold the results
result_df = pd.DataFrame({
'SummaryStatisticType': [summary_statistic_type],
'AveragePurchaseHighValue': [average_purchase_high_value],
'MedianScoreHighValue': [median_score_high_value],
'SegmentAnalysisHighValue': [segment_statistic_high_value]
})
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return result_df
if __name__ == "__main__":
t1 = task1("data/SMALL_amazon_customers_data.csv")
t2 = task2(t1, 1500, 1.5)
t3 = task3(t2, "mean")
t4 = task4(t2, t3, "mean")
print(t4)
|
# Create app for demo-image-classification-part-2 readme.md
# Image Classification Part 2 Using Taipy Core
## Usage
- [Usage](#usage)
- [Image Classification Part 2](#what-is-image-classification-part-2)
- [Directory Structure](#directory-structure)
- [License](#license)
- [Installation](#installation)
- [Contributing](#contributing)
- [Code of conduct](#code-of-conduct)
## What is Image Classification Part 2
Taipy is a Python library for creating Business Applications. More information on our
[website](https://www.taipy.io).
[Image Classification Part 2](https://github.com/Avaiga/image-classification-part-2) is about how to use Taipy Core and Taipy Studio to efficiently create and manage Data and ML pipelines.
### Demo Type
- **Level**: Intermediate
- **Topic**: Taipy-CORE
- **Components/Controls**:
- Taipy CORE: configs, Taipy Studio
## How to run
This demo works with a Python version superior to 3.8. Install the dependencies of the *Pipfile* and run the *main.py*.
## Introduction
The Demo is the second part of the Image Classification App using Taipy and Tensorflow, and it is recommended to watch the first part or go through the repo to understand the main functions and tasks.
The video on Youtubecovers using Taipy Core and Taipy Studio to build pipelines and manage different scenarios. The demo covers copying necessary functions into a script file, configuring datanodes, specifying functions for tasks, configuring pipelines, and executing the scenario with Taipy Studio and/or Taipy Core.
## Directory Structure
- `src/`: Contains the demo source code.
- `docs/`: contains the images for the documentation
- `CODE_OF_CONDUCT.md`: Code of conduct for members and contributors of _image-classification-part-2_.
- `CONTRIBUTING.md`: Instructions to contribute to _image-classification-part-2_.
- `INSTALLATION.md`: Instructions to install _image-classification-part-2_.
- `LICENSE`: The Apache 2.0 License.
- `Pipfile`: File used by the Pipenv virtual environment to manage project dependencies.
- `README.md`: Current file.
## License
Copyright 2022 Avaiga Private Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
[http://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
## Installation
Want to install _image-classification-part-2_? Check out our [`INSTALLATION.md`](INSTALLATION.md) file.
## Contributing
Want to help build _image-classification-part-2_? Check out our [`CONTRIBUTING.md`](CONTRIBUTING.md) file.
## Code of conduct
Want to be part of the _image-classification-part-2_ community? Check out our [`CODE_OF_CONDUCT.md`](CODE_OF_CONDUCT.md) file.
|
# Create app for demo-image-classification-part-2 config_from_tp_studio.py
from main_functions import *
from taipy import Config
import taipy as tp
Config.load('built_with_tp_studio.toml')
scenario_cfg = Config.scenarios['testing_scenario']
tp.Core().run()
main_scenario = tp.create_scenario(scenario_cfg)
tp.submit(main_scenario)
|
# Create app for demo-image-classification-part-2 main_functions.py
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import pandas as pd
import numpy as np
class_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train / 255.0
y_train = to_categorical(y_train, len(class_names))
x_test = x_test / 255.0
y_test = to_categorical(y_test, len(class_names))
def tf_read(path: str): return tf.keras.models.load_model(path)
def tf_write(model, path: str):model.save(path)
#Task 1.1: Building the base model
def initialize_model(loss_f):
# Creating model base
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss=loss_f,
metrics=['accuracy'])
return model
#Task 1.2: Initial training witha fixed number of epochs
datagen = ImageDataGenerator(
horizontal_flip=True,
width_shift_range=3/32,
height_shift_range=3/32
)
def initial_model_training(n_epochs, model):
print("INITIAL MODEL TRAINING STARTED: ")
h = model.fit(
datagen.flow(x_train, y_train, batch_size=64),
epochs=n_epochs,
validation_data=(x_test, y_test))
training_result = pd.DataFrame.from_dict(h.history)
training_result["N_Epochs"] = range(1,len(training_result)+1)
return training_result, model
#Task 2.1: Merge train with a chosen number of epochs (training + validation set as training)
def merged_train(number_of_epochs,model):
print("MERGED TRAIN STARTED: ")
# merge the training and validation sets
x_all = np.concatenate((x_train, x_test))
y_all = np.concatenate((y_train, y_test))
h = model.fit(
datagen.flow(x_all, y_all, batch_size=64),
epochs=number_of_epochs)
training_result = pd.DataFrame.from_dict(h.history)
training_result["N_Epochs"] = range(1,len(training_result)+1)
return training_result, model
#Task 2.2: Predict image class
def predict_image(image_path, trained_model):
print("PREDICTION TASK STARTED: ")
img_array = tf.keras.utils.load_img(image_path, target_size=(32, 32))
image = tf.keras.utils.img_to_array(img_array)
image = np.expand_dims(image, axis=0) / 255.
prediction_result = class_names[np.argmax(trained_model.predict(image))]
print("Prediction result: {}".format(prediction_result))
return prediction_result
|
# Create app for demo-image-classification-part-2 main.py
from main_functions import *
from taipy import Config
import taipy as tp
#######################################################################################################
##############################################PIPELINE 1###############################################
#######################################################################################################
###TASK 1.1: Building the base model
#input dn
loss_fn_cfg = Config.configure_data_node("loss_fn", default_data='categorical_crossentropy')
#output dn
base_model_cfg = Config.configure_generic_data_node("base_model",
read_fct=tf_read, read_fct_params=('models/base_model',),
write_fct=tf_write, write_fct_params=('models/base_model',))
#task
BUILD_CNN_BASE_cfg = Config.configure_task("BUILD_CNN_BASE",
initialize_model,
loss_fn_cfg,
base_model_cfg)
###TASK 1.2: Initial training with a fixed number of epochs
#input dn
initial_n_epochs_cfg = Config.configure_data_node("initial_n_epochs", default_data=30)
#output dn
initial_train_perf_cfg = Config.configure_data_node("initial_train_perf")
trained_initial_model_cfg = Config.configure_generic_data_node("trained_initial_model",
read_fct=tf_read, read_fct_params=('models/trained_initial_model',),
write_fct=tf_write, write_fct_params=('models/trained_initial_model',))
#task
INITIAL_TRAIN_cfg = Config.configure_task("INITIAL_TRAIN",
initial_model_training,
[initial_n_epochs_cfg, base_model_cfg],
[initial_train_perf_cfg, trained_initial_model_cfg])
#pipeline
pipeline_1_cfg = Config.configure_pipeline("pipeline_1",
[BUILD_CNN_BASE_cfg,
INITIAL_TRAIN_cfg])
#######################################################################################################
##############################################PIPELINE 2###############################################
#######################################################################################################
###TASK 2.1: Merge train with a chosen number of epochs (training + validation set as training)
#input dn
optimal_n_epochs_cfg = Config.configure_data_node("optimal_n_epochs", default_data=13)
#output dn
merged_train_perf_cfg = Config.configure_data_node("merged_train_perf")
merged_trained_model_cfg = Config.configure_generic_data_node("merged_trained_model",
read_fct=tf_read, read_fct_params=('models/merged_trained_model',),
write_fct=tf_write, write_fct_params=('models/merged_trained_model',))
#task
MERGED_TRAIN_cfg = Config.configure_task("MERGED_TRAIN",
merged_train,
[optimal_n_epochs_cfg, base_model_cfg],
[merged_train_perf_cfg, merged_trained_model_cfg])
###TASK 2.2: Make a prediction from an image path
#input dn: the trained model datanode, already set up
image_path_dn_cfg = Config.configure_data_node("image_path_dn", default_data="test_images/dog.jpg")
#output dn
prediction_cfg = Config.configure_data_node("image_prediction")
#task
IMAGE_PREDICT_cfg = Config.configure_task("IMAGE_PREDICT", predict_image,
[image_path_dn_cfg, merged_trained_model_cfg],
[prediction_cfg])
#pipeline
pipeline_2_cfg = Config.configure_pipeline("pipeline_2",
[MERGED_TRAIN_cfg,
IMAGE_PREDICT_cfg])
#######################################################################################################
##############################################Scenario#################################################
#######################################################################################################
scenario_cfg = Config.configure_scenario("testing_scenario",
[pipeline_1_cfg, pipeline_2_cfg])
tp.Core().run()
main_scenario = tp.create_scenario(scenario_cfg)
tp.submit(main_scenario)
Config.export("tpcore.toml")
|
# Create app for demo-edit-log LICENSE.md
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Avaiga Private Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
# Create app for demo-edit-log main.py
from taipy.gui import Gui
import taipy as tp
from taipy.gui import notify
from config.config import *
# Variables for bindings
all_scenarios = [] # List of scenarios
all_scenarios_configs = [] # List of scenario configs
all_data_nodes = [] # List of node IDs
current_scenario = None
current_data_node = None
current_scenario_config = None
scenario_name = None
edits = [["","",""]]
value = None
commit_message = ""
create_scenario_dialog_visible = False
set_value_dialog_visible = False
# ====================================================================
def on_init(state):
state.all_scenarios = [(sc.id, sc.name) for sc in tp.get_scenarios()]
state.all_scenarios_configs = [sc.id for sc in Config.scenarios.values()]
def on_change_current_scenario(state):
scenario = tp.get(state.current_scenario[0])
# Propagate to list of nodes:
state.all_data_nodes = [(dn.id, dn.config_id) for dn in scenario.data_nodes.values()]
def on_change(state, var_name: str, var_value):
if var_name == "all_data_nodes":
# Propagate to current data node (pick any...):
if var_value and len(var_value) > 0:
data_node = next(iter(var_value))
state.current_data_node = data_node
if var_name == "current_data_node":
# Propagate to list of edits:
refresh_edit_log(state)
def refresh_edit_log(state):
# Forces a refresh of the edit log:
if state.current_data_node:
data_node_id = state.current_data_node[0]
data_node = tp.get(data_node_id)
state.edits = get_edit_log(data_node) if data_node else []
def create_scenario_clicked(state):
state.scenario_name = None
state.create_scenario_dialog_visible = True
def get_edit_log(data_node):
def _get_edit_fields(edit):
return [str(edit.get("timestamp")), edit.get("job_id"), edit.get("message")]
return [_get_edit_fields(edit) for edit in data_node.edits] if data_node else []
def on_submit_button_clicked(state):
scenario_id = state.current_scenario[0]
scenario = tp.get(scenario_id)
tp.submit(scenario)
# Force refresh of current data node:
refresh_edit_log(state)
notify(state, message=f"Scenario {scenario.name} submitted!")
def on_set_value_clicked(state):
state.set_value_dialog_visible = True
def create_scenario_dialog_action(state, id, action, payload):
state.create_scenario_dialog_visible = False
btn_idx = payload["args"][0]
if btn_idx == 0: # OK button
scenario_cfg = Config.scenarios[state.current_scenario_config]
name = state.scenario_name
scenario = tp.create_scenario(config=scenario_cfg, name=name)
all_scenarios = state.all_scenarios
all_scenarios.append((scenario.id, scenario.name))
state.all_scenarios = all_scenarios
notify(state, message=f"Scenario {scenario.name} created!")
def set_value_dialog_action(state, id, action, payload):
btn_idx = payload["args"][0]
if btn_idx == 0: # OK button
data_node_id = state.current_data_node[0]
node = tp.get(data_node_id)
node.write(state.value, message=state.commit_message)
state.current_data_node = state.current_data_node
state.set_value_dialog_visible = False
history_table_columns = {
"0": {"title": "Date"},
"1": {"title": "Job Id"},
"2": {"title": "Comments"},
}
scenario_manager_page = """
<|part|class_name=card|
## Data Node Selection
<|{current_scenario}|selector|lov={all_scenarios}|dropdown|label=<select a scenario>|on_change=on_change_current_scenario|>
<|{current_data_node}|selector|lov={all_data_nodes}|dropdown|label=<select a data node>|>
<|Create New Scenario...|button|on_action=create_scenario_clicked|>
<|Run Scenario|button|active={current_scenario is not None}|on_action=on_submit_button_clicked|>
|>
<|part|class_name=card|
## Data Node Edit Log
<|{edits}|table|columns={history_table_columns}|width=50vw|>
<|Refresh|button|on_action=refresh_edit_log|>
<|Set value...|button|active={len(edits) > 0}|on_action=on_set_value_clicked|>
|>
<|{create_scenario_dialog_visible}|dialog|title=Create Scenario|labels=OK;Cancel|on_action=create_scenario_dialog_action|
Select a scenario config:
<|{current_scenario_config}|selector|dropdown|lov={all_scenarios_configs}|>
Enter a name for your scenario:
<|{scenario_name}|input|change_delay=10|>
|>
<|{set_value_dialog_visible}|dialog|title=Set value|labels=OK;Cancel|change_delay=10|on_action=set_value_dialog_action|
<|{value}|input|label=Enter a value|>
<|Optional commit message|expandable|expanded=False|
<|{commit_message}|input|>
|>
|>
"""
if __name__ == "__main__":
gui = Gui(page=scenario_manager_page)
core = tp.Core()
tp.run(core, gui, port=8080, dark_mode=False)
|
# Create app for demo-edit-log config.py
from algos.algos import task_function
from taipy import Config
Config.configure_job_executions(mode="standalone", max_nb_of_workers=1)
node_start_cfg = Config.configure_data_node(
id="node_start", default_data=[1, 2], description="This is the initial data node."
)
node_end_cfg = Config.configure_data_node(id="node_end", description="This is the result data node.")
task_cfg = Config.configure_task(id="task", input=[node_start_cfg], output=node_end_cfg, function=task_function)
pipeline_cfg = Config.configure_pipeline(id="pipeline", task_configs=[task_cfg])
Config.configure_scenario("My_super_scenario", [pipeline_cfg])
|
# Create app for demo-edit-log algos.py
def task_function(data):
"""A dummy task function"""
print(f"Executing function: {data}")
return data
|
# Create app for demo-face-recognition LICENSE.md
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Avaiga Private Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
# Create app for demo-face-recognition find_taipy_gui_dir.py
# This Python script tries to locate the taipy.gui package, and
# prints its absolute path if it finds it.
import importlib.util
import os
taipy_gui = importlib.util.find_spec("taipy.gui")
if taipy_gui is None:
print("Cannot find 'taipy.gui'\nPlease run 'pip install taipy-gui'.")
else:
print(f"Taipy GUI location: {os.path.dirname(taipy_gui.origin)}")
|
# Create app for demo-face-recognition GETTING_STARTED.md
# Getting Started
## Installation
First you need to install the dependencies and build the front-end. Please refer to [INSTALLATION.md](INSTALLATION.md).
## How to use the demo
Once you started the application, your default Web browser should open automatically. If not, open this URL: [http://127.0.0.1:9090](http://127.0.0.1:9090).
The browser should ask you for the authorization to use the camera. Press "Allow".
<p align="center">
<img src="first_startup.png" alt="drawing" width="400"/>
</p>
Once allowed, your camera should activate and you will see a live view of the video. Notice that your face your already be detected and the label "None" is displayed. This is because the application does not know you yet.
<p align="center">
<img src="app_running.png" alt="drawing" width="400"/>
</p>
To train the app to recognize your face, press the "Capture" button. This will show a dialog with a captured image. Enter a name for that face and press "validate".
The more training examples, the better. So add few more captured images of your faces.
Notice that the case of the given name is important. So always use the same name for captured image.
Example: "Greg" and "greg" will be considered as two different names.
<p align="center">
<img src="captured_image.png" alt="drawing" width="400"/>
</p>
After say 6 different images, you can ask the system to learn from them by pressing the "Re-train" button.
Depending on the number of images to process, this can take from a second to a dozen of seconds.
The application will then be able to recognize the new face, and the name should be displayed on screen!
<p align="center">
<img src="face_recognized.png" alt="drawing" width="400"/>
</p>
|
# Create app for demo-face-recognition main.py
from taipy.gui import Gui
from webcam import Webcam
import cv2
import PIL.Image
import io
import logging
import uuid
from pathlib import Path
from demo.faces import detect_faces, recognize_face, train_face_recognizer
logging.basicConfig(level=logging.DEBUG)
training_data_folder = Path("images")
show_capture_dialog = False
capture_image = False
show_add_captured_images_dialog = False
labeled_faces = [] # Contains rect with label (for UI component)
captured_image = None
captured_label = ""
def on_action_captured_image(state, id, action, payload):
print("Captured image")
choice = payload["args"][0]
if choice == 0:
# Add image to training data:
img = state.captured_image
file_name = str(uuid.uuid4()) + ".jpg"
label = state.captured_label
image_path = Path(training_data_folder, file_name)
with image_path.open("wb") as f:
f.write(img)
label_file_path = Path(training_data_folder, "data.csv")
with label_file_path.open("a") as f:
f.write(f"{file_name},{label}\n")
state.captured_image = None
state.captured_label = ""
state.show_capture_dialog = False
def process_image(state, frame):
print("Processing image...")
found = detect_faces(frame)
labeled_images = []
for rect, img in found:
(label, _) = recognize_face(img)
labeled_images.append((img, rect, label))
# Return this to the UI component so that it can display a rect around recognized faces:
state.labeled_faces = [str([*rect, label]) for (_, rect, label) in labeled_images]
# Capture image (actually we consider only the first detected face)
if state.capture_image and len(labeled_images) > 0:
img = labeled_images[0][0]
label = labeled_images[0][2]
state.captured_image = cv2.imencode(".jpg", img)[1].tobytes()
state.captured_label = label
state.show_capture_dialog = True
state.capture_image = False
def handle_image(state, action, args, value):
print("Handling image...")
payload = value["args"][0]
bytes = payload["data"]
logging.debug(f"Received data: {len(bytes)}")
temp_path = "temp.png"
# Write Data into temp file (OpenCV is unable to load from memory)
image = PIL.Image.open(io.BytesIO(bytes))
image.save(temp_path)
# Load image file
try:
img = cv2.imread(temp_path, cv2.IMREAD_UNCHANGED)
except cv2.error as e:
logging.error(f"Failed to read image file: {e}")
return
process_image(state, img)
# Finish. Tempfile is removed.
def button_retrain_clicked(state):
print("Retraining...")
train_face_recognizer(training_data_folder)
webcam_md = """<|toggle|theme|>
<container|container|part|
# Face **recognition**{: .color-primary}
This demo shows how to use [Taipy](https://taipy.io/) with a [custom GUI component](https://docs.taipy.io/en/latest/manuals/gui/extension/) to capture video from your webcam and do realtime face detection. What this application demonstrates:
- How to build a complex custom UI component for Taipy.
- How to detect and recognize faces in the image in real time using [OpenCV](https://opencv.org/).
<br/>
<card|card p-half|part|
## **Webcam**{: .color-primary} component
<|text-center|part|
<|webcam.Webcam|faces={labeled_faces}|classname=face_detector|id=my_face_detector|on_data_receive=handle_image|sampling_rate=100|>
<|Capture|button|on_action={lambda s: s.assign("capture_image", True)}|>
<|RE-train|button|on_action=button_retrain_clicked|>
>
|card>
|container>
<|{show_capture_dialog}|dialog|labels=Validate;Cancel|on_action=on_action_captured_image|title=Add new training image|
<|{captured_image}|image|width=300px|height=300px|>
<|{captured_label}|input|>
|>
"""
if __name__ == "__main__":
# Create dir where the pictures will be stored
if not training_data_folder.exists():
training_data_folder.mkdir()
train_face_recognizer(training_data_folder)
gui = Gui(webcam_md)
gui.add_library(Webcam())
gui.run(port=9090)
|
# Create app for demo-face-recognition faces.py
import cv2
from pathlib import Path
import os
import numpy as np
import logging
from .image import crop_image
import pandas as pd
logging.basicConfig(level=logging.DEBUG)
# Create our face detector. Both HAAR and LBP classifiers are somehow equivelent and both give good results.
# Up to you to choose one or the other.
face_detector = cv2.CascadeClassifier("classifiers/haarcascade_frontalface_default.xml")
# face_cascade = cv2.CascadeClassifier("classifiers/lbpcascade_frontalface_improved.xml")
# Create our face recognizer
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
# The subjects that can be recognized
subjects = {}
FACE_DETECTOR_SCALE_FACTOR = 1.1
FACE_DETECTOR_MIN_NEIGHBORS = 5
def detect_faces(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
detected_faces = face_detector.detectMultiScale(
gray_image,
scaleFactor=FACE_DETECTOR_SCALE_FACTOR,
minNeighbors=FACE_DETECTOR_MIN_NEIGHBORS,
)
if len(detected_faces) == 0:
return []
return [(rect, crop_image(image, rect)) for rect in detected_faces]
def recognize_face(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if len(subjects) == 0:
# No subject, the model hasn't been trained, let's do nothing.
return (None, None)
try:
face = face_recognizer.predict(gray_image)
except Exception:
logging.warning("Could not run face recognizer", exc_info=True)
# Return the name of the recognize subject and the confident level
return (subjects[face[0]], face[1])
def train_face_recognizer(training_data_directory="images"):
data_file_path = Path(training_data_directory, "data.csv")
if not data_file_path.exists():
# Create file
with data_file_path.open("w") as f:
f.write("image,label\n")
# Load file as CSV file
data = pd.read_csv(data_file_path, delimiter=",", header=0).to_numpy()
# Subjects that can be recognized from these data:
identified_subjects = np.unique(data[:, 1])
global subjects
if len(identified_subjects) == 0:
# No subject... We stop here
subjects = {}
return
else:
# Update subjects (persons who can be recognized)
subjects = {e[0]: e[1] for e in enumerate(identified_subjects)}
# Prepare training data
faces, labels = [], []
for row in data:
file_name = row[0]
label = np.where(identified_subjects == row[1])[0][0]
file_path = Path(training_data_directory, file_name)
if os.path.exists(file_path):
img = cv2.imread(str(file_path), cv2.IMREAD_GRAYSCALE)
faces.append(img)
labels.append(label)
# Run training!
logging.debug(f"Run training for {subjects}...")
face_recognizer.train(faces, np.array(labels))
|
# Create app for demo-face-recognition __init__.py
|
# Create app for demo-face-recognition image.py
def crop_image(img, rect):
"""An utility function to crop an image to the given rect"""
x, y, w, h = rect
return img[y : y + h, x : x + w]
|
# Create app for demo-face-recognition __init__.py
from .webcam import Webcam
|
# Create app for demo-face-recognition webcam.py
from taipy.gui.extension import ElementLibrary, Element, ElementProperty, PropertyType
class Webcam(ElementLibrary):
def get_name(self) -> str:
return "webcam"
def get_elements(self) -> dict:
return {
"Webcam": Element(
"faces",
{
"faces": ElementProperty(PropertyType.dynamic_list),
"id": ElementProperty(PropertyType.string),
"classname": ElementProperty(PropertyType.dynamic_string),
"on_data_receive": ElementProperty(PropertyType.string),
"sampling_rate": ElementProperty(PropertyType.number),
},
react_component="Webcam",
)
}
def get_scripts(self) -> list[str]:
return ["webui/dist/webcam.js"]
|
# Create app for demo-taipy-gui-starter-2 main.py
from taipy.gui import Gui, notify
import pandas as pd
import webbrowser
import datetime
import os
DOWNLOAD_PATH = "data/download.csv"
upload_file = None
section_1 = """
<center>
<|navbar|lov={[("page1", "This Page"), ("https://docs.taipy.io/en/latest/manuals/about/", "Taipy Docs"), ("https://docs.taipy.io/en/latest/getting_started/", "Getting Started")]}|>
</center>
Data Dashboard with Taipy
=========================
<|layout|columns=1 3|
<|
### Let's create a simple Data Dashboard!
<br/>
<center>
<|{upload_file}|file_selector|label=Upload Dataset|>
</center>
|>
<|
<center>
<|{logo}|image|height=250px|width=250px|on_action=image_action|>
</center>
|>
|>
"""
section_2 = """
## Data Visualization
<|{dataset}|chart|mode=lines|x=Date|y[1]=MinTemp|y[2]=MaxTemp|color[1]=blue|color[2]=red|>
"""
section_3 = """
<|layout|columns= 1 5|
<|
## Custom Parameters
**Starting Date**\n\n<|{start_date}|date|not with_time|on_change=start_date_onchange|>
<br/><br/>
**Ending Date**\n\n<|{end_date}|date|not with_time|on_change=end_date_onchange|>
<br/>
<br/>
<|button|label=GO|on_action=button_action|>
|>
<|
<center>
<h2>Dataset</h2><|{DOWNLOAD_PATH}|file_download|on_action=download|>
<|{dataset}|table|page_size=10|height=500px|width=65%|>
</center>
|>
|>
"""
def image_action(state):
webbrowser.open("https://taipy.io")
def get_data(path: str):
dataset = pd.read_csv(path)
dataset["Date"] = pd.to_datetime(dataset["Date"]).dt.date
return dataset
def start_date_onchange(state, var_name, value):
state.start_date = value.date()
def end_date_onchange(state, var_name, value):
state.end_date = value.date()
def filter_by_date_range(dataset, start_date, end_date):
mask = (dataset['Date'] > start_date) & (dataset['Date'] <= end_date)
return dataset.loc[mask]
def button_action(state):
state.dataset = filter_by_date_range(dataset, state.start_date, state.end_date)
notify(state, "info", "Updated date range from {} to {}.".format(state.start_date.strftime("%m/%d/%Y"), state.end_date.strftime("%m/%d/%Y")))
def download(state):
state.dataset.to_csv(DOWNLOAD_PATH)
logo = "images/taipy_logo.jpg"
dataset = get_data("data/weather.csv")
start_date = datetime.date(2008, 12, 1)
end_date = datetime.date(2017, 6, 25)
gui = Gui(page=section_1+section_2+section_3)
if __name__ == '__main__':
# the options in the gui.run() are optional, try without them
gui.run(title='Taipy Demo GUI 2',
host='0.0.0.0',
port=os.environ.get('PORT', '5050'),
dark_mode=False)
else:
app = gui.run(title='Taipy Demo GUI 2',
dark_mode=False,
run_server=False)
|
# Create app for demo-production-planning main.py
from pages.compare_cycles_md import *
import taipy as tp
import os
import pandas as pd
from taipy.gui import Gui
if __name__ == "__main__":
tp.Core().run()
if len(tp.get_scenarios())==0:
cc_create_scenarios_for_cycle()
from pages.compare_scenario_md import *
from pages.databases_md import *
from pages.data_visualization_md import *
from pages.shared import *
from pages.scenario_manager.scenario_manager_md import *
def create_chart(sm_results: pd.DataFrame, var: str):
"""Functions that create/update the chart table visible in the "Databases" page. This
function is used in the "on_change" function to change the chart when the graph selected is changed.
Args:
sm_results (pd.DataFrame): the results database that comes from the state
var (str): the string that has to be found in the columns that are going to be used to create the chart table
Returns:
pd.DataFrame: the chart with the proper columns
"""
if var == 'Cost':
columns = ['index'] + [col for col in sm_results.columns if var in col]
else:
columns = ['index'] + [col for col in sm_results.columns if var in col and 'Cost' not in col]
chart = sm_results[columns]
return chart
def on_change(state, var_name, var_value):
"""This function is called whener a change in the state variables is done. When a change is seen, operations can be created
depending on the variable changed
Args:
state (State): the state object of Taipy
var_name (str): the changed variable name
var_value (obj): the changed variable value
"""
# if the changed variable is the scenario selected
if var_name == "selected_scenario" and var_value:
if state.selected_scenario.results.is_ready_for_reading:
# I update all the other useful variables
update_variables(state)
if var_name == 'sm_graph_selected' or var_name == "selected_scenario":
# Update the chart table
str_to_select_chart = None
chart_mapping = {
'Costs': 'Cost',
'Purchases': 'Purchase',
'Productions': 'Production',
'Stocks': 'Stock',
'Back Order': 'BO',
'Product FPA': 'FPA',
'Product FPB': 'FPB',
'Product RP1': 'RP1',
'Product RP2': 'RP2'
}
str_to_select_chart = chart_mapping.get(state.sm_graph_selected)
state.chart = create_chart(state.sm_results, str_to_select_chart)
# If we are on the 'Databases' page, we have to create a temp CSV file
if state.page == 'Databases':
state.d_chart_csv_path = PATH_TO_TABLE
state.chart.to_csv(state.d_chart_csv_path, sep=',')
def on_init(state):
state.state_id = str(os.urandom(32))
update_scenario_selector(state)
pages = {"/": Markdown('pages/shared.md'),
"Data-Visualization":da_data_visualisation_md,
"Scenario-Manager":sm_scenario_manager_md,
"Compare-Scenarios":cs_compare_scenario_md,
"Compare-Cycles":cc_compare_cycles_md,
'Databases':da_databases_md
}
if __name__ == "__main__":
gui = Gui(pages=pages)
gui.run(title="Production planning")
|
# Create app for demo-production-planning config.py
from taipy import Config, Frequency
import json
from algos.algos import *
# This code produces scenario_cfg and configures our graph of execution
###############################################################################
# Data nodes
###############################################################################
# we create our first datanode, the source is csv file
path_to_demand = 'data/time_series_demand.csv'
demand_cfg = Config.configure_data_node(id="demand",
storage_type="csv",
path=path_to_demand,
has_header=True)
with open('data/fixed_variables_default.json') as f:
fixed_variables_default = json.load(f)
# creation of our second datanode that will have as a default data our fixed_variables_default
# this is this datanode that we will write on when we submit other values for fixed_variable
fixed_variables_cfg = Config.configure_data_node(id="fixed_variables", default_data = fixed_variables_default)
solver_name_cfg = Config.configure_data_node(id="solver_name", default_data="Default")
# here are the datanodes that keep track of the model : the model_created datanode, the model_solved datanode
model_created_cfg = Config.configure_data_node(id="model_created")
model_solved_cfg = Config.configure_data_node(id="model_solved")
# and this is the datanode that will be used to get our results from the main code
results_cfg = Config.configure_data_node(id="results")
###############################################################################
# Tasks
###############################################################################
# (demand_cfg,fixed_variables_cfg) -> |create_model| -> (model_created_cfg)
create_model_task = Config.configure_task(id="create_model",
input=[demand_cfg,fixed_variables_cfg],
function=create_model,
output=[model_created_cfg])
# (model_created_cfg, solver_name_cfg) -> |solve_model| -> (model_solved_cfg)
solve_model_cfg = Config.configure_task(id="solve_model",
input=[model_created_cfg, solver_name_cfg],
function=solve_model,
output=[model_solved_cfg])
# (model_solved_cfg,fixed_variables_cfg,demand_cfg) -> |create_results| -> (results_cfg)
create_results_cfg = Config.configure_task(id="create_results",
input=[model_solved_cfg,fixed_variables_cfg,demand_cfg],
function=create_results,
output=[results_cfg])
###############################################################################
# Scenario config
###############################################################################
scenario_cfg = Config.configure_scenario(id="scenario",task_configs=[create_model_task,solve_model_cfg,create_results_cfg], frequency=Frequency.MONTHLY)
Config.export("config/config.toml")
|
# Create app for demo-production-planning algos.py
import pandas as pd
import numpy as np
from pulp import *
# This code is used for config.py
###############################################################################
# Functions
###############################################################################
def create_model(demand: pd.DataFrame, fixed_variables: dict):
"""This function creates the model. It will creates all the variables and contraints of the problem.
It will also create the objective function.
Args:
demand (pd.DataFrame): demand dataframe
fixed_variables (dict): fixed variables dictionary
Returns:
dict: model_info (with the model created)
"""
print("Creating the model...")
monthly_demand_FPA = demand["Demand_A"]
monthly_demand_FPB = demand["Demand_B"]
nb_periods = len(monthly_demand_FPA)
# creation of the model
prob = LpProblem("Production_Planning", LpMinimize)
# creation of the variables
# for product A
monthly_production_FPA = [
LpVariable(f"Monthly_Production_FPA_{m}", 0) for m in range(nb_periods)
]
monthly_stock_FPA = [
LpVariable(f"Monthly_Stock_FPA_{m}", 0) for m in range(nb_periods)
]
monthly_back_order_FPA = [
LpVariable(f"Monthly_Back_Order_FPA_{m}", 0) for m in range(nb_periods)
]
# for product B
monthly_production_FPB = [
LpVariable(f"Monthly_Production_FPB_{m}", 0) for m in range(nb_periods)
]
monthly_stock_FPB = [
LpVariable(f"Monthly_Stock_FPB_{m}", 0) for m in range(nb_periods)
]
monthly_back_order_FPB = [
LpVariable(f"Monthly_Back_Order_FPB_{m}", 0) for m in range(nb_periods)
]
# for product 1
monthly_purchase_RPone = [
LpVariable(f"Monthly_Purchase_RPone_{m}", 0) for m in range(nb_periods)
]
monthly_stock_RPone = [
LpVariable(f"Monthly_Stock_RPone_{m}", 0) for m in range(nb_periods)
]
monthly_stock_not_used_RPone = [
LpVariable(f"Monthly_Stock_not_used_RPone{m}", 0) for m in range(nb_periods)
]
monthly_stock_RPone_for_FPA = [
LpVariable(f"Monthly_Stock_RPone_for_FPA{m}", 0) for m in range(nb_periods)
]
monthly_stock_RPone_for_FPB = [
LpVariable(f"Monthly_Stock_RPone_for_FPB{m}", 0) for m in range(nb_periods)
]
# for product 2
monthly_purchase_RPtwo = [
LpVariable(f"Monthly_Purchase_RPtwo{m}", 0) for m in range(nb_periods)
]
monthly_stock_RPtwo = [
LpVariable(f"Monthly_Stock_RP{m}two", 0) for m in range(nb_periods)
]
monthly_stock_not_used_RPtwo = [
LpVariable(f"Monthly_Stock_not_used_RPtwo{m}", 0) for m in range(nb_periods)
]
monthly_stock_RPtwo_for_FPA = [
LpVariable(f"Monthly_Stock_RPtwo_for_FPA{m}", 0) for m in range(nb_periods)
]
monthly_stock_RPtwo_for_FPB = [
LpVariable(f"Monthly_Stock_RPtwo_for_FPB{m}", 0) for m in range(nb_periods)
]
# creation of the constraints
# Kirchoff's law for product A
for m in range(1, nb_periods):
prob += (
monthly_production_FPA[m]
- monthly_back_order_FPA[m - 1]
+ monthly_stock_FPA[m - 1]
== monthly_demand_FPA[m] + monthly_stock_FPA[m] - monthly_back_order_FPA[m]
)
# Kirchoff's law for product B
for m in range(1, nb_periods):
prob += (
monthly_production_FPB[m]
- monthly_back_order_FPB[m - 1]
+ monthly_stock_FPB[m - 1]
== monthly_demand_FPB[m] + monthly_stock_FPB[m] - monthly_back_order_FPB[m]
)
# Kirchoff's law for product 1
for m in range(1, nb_periods):
prob += (
monthly_purchase_RPone[m - 1] + monthly_stock_not_used_RPone[m - 1]
== monthly_stock_RPone[m]
)
# MS Fix for None issue
prob += monthly_purchase_RPone[nb_periods - 1] == 0
for m in range(1, nb_periods):
prob += (
monthly_purchase_RPtwo[m - 1] + monthly_stock_not_used_RPtwo[m - 1]
== monthly_stock_RPtwo[m]
)
# MS Fix for None issue
prob += monthly_purchase_RPtwo[nb_periods - 1] == 0
for m in range(nb_periods):
prob += monthly_production_FPA[m] <= fixed_variables["Max_Capacity_FPA"]
prob += monthly_production_FPA[0] == fixed_variables["Initial_Production_FPA"]
prob += monthly_back_order_FPA[0] == fixed_variables["Initial_Back_Order_FPA"]
prob += monthly_stock_FPA[0] == fixed_variables["Initial_Stock_FPA"]
# constraints on bill of materials for product A
for m in range(1, nb_periods):
prob += (
monthly_production_FPA[m]
== fixed_variables["number_RPone_to_produce_FPA"]
* monthly_stock_RPone_for_FPA[m - 1]
+ fixed_variables["number_RPtwo_to_produce_FPA"]
* monthly_stock_RPtwo_for_FPA[m - 1]
)
for m in range(nb_periods):
prob += (
fixed_variables["number_RPone_to_produce_FPA"]
* monthly_stock_RPone_for_FPA[m]
== fixed_variables["number_RPtwo_to_produce_FPA"]
* monthly_stock_RPtwo_for_FPA[m]
)
# constraints on the variables : max and initial value for product A
for m in range(nb_periods):
prob += monthly_production_FPB[m] <= fixed_variables["Max_Capacity_FPB"]
prob += monthly_production_FPB[0] == fixed_variables["Initial_Production_FPB"]
prob += monthly_back_order_FPB[0] == fixed_variables["Initial_Back_Order_FPB"]
prob += monthly_stock_FPB[0] == fixed_variables["Initial_Stock_FPB"]
# constraints on bill of materials for product B
for m in range(1, nb_periods):
prob += (
monthly_production_FPB[m]
== fixed_variables["number_RPone_to_produce_FPB"]
* monthly_stock_RPone_for_FPB[m - 1]
+ fixed_variables["number_RPtwo_to_produce_FPB"]
* monthly_stock_RPtwo_for_FPB[m - 1]
)
for m in range(nb_periods):
prob += (
fixed_variables["number_RPone_to_produce_FPB"]
* monthly_stock_RPone_for_FPB[m]
== fixed_variables["number_RPtwo_to_produce_FPB"]
* monthly_stock_RPtwo_for_FPB[m]
)
for m in range(nb_periods):
prob += monthly_stock_RPone[m] <= fixed_variables["Max_Stock_RPone"]
prob += monthly_stock_RPone[0] == fixed_variables["Initial_Stock_RPone"]
prob += monthly_purchase_RPone[0] == fixed_variables["Initial_Purchase_RPone"]
for m in range(nb_periods):
prob += monthly_stock_RPone[m] == (
monthly_stock_not_used_RPone[m]
+ monthly_stock_RPone_for_FPA[m]
+ monthly_stock_RPone_for_FPB[m]
)
# constraints on the variables : max and initial value for product 1
for m in range(nb_periods):
prob += monthly_stock_RPtwo[m] <= fixed_variables["Max_Stock_RPtwo"]
prob += monthly_stock_RPtwo[0] == fixed_variables["Initial_Stock_RPtwo"]
prob += monthly_purchase_RPtwo[0] == fixed_variables["Initial_Purchase_RPtwo"]
# constraints that define what is a stock for product 1
for m in range(nb_periods):
prob += monthly_stock_RPtwo[m] == (
monthly_stock_not_used_RPtwo[m]
+ monthly_stock_RPtwo_for_FPA[m]
+ monthly_stock_RPtwo_for_FPB[m]
)
# constraints on the variables : max value for product A and B (cumulative)
for m in range(nb_periods):
prob += (
monthly_production_FPA[m] + monthly_demand_FPB[m]
<= fixed_variables["Max_Capacity_of_FPA_and_FPB"]
)
# setting the objective function
prob += lpSum(
fixed_variables["Weight_of_Back_Order"]
/ 100
* (
fixed_variables["cost_FPA_Back_Order"] * monthly_back_order_FPA[m]
+ fixed_variables["cost_FPB_Back_Order"] * monthly_back_order_FPB[m]
)
+ fixed_variables["Weight_of_Stock"]
/ 100
* (
fixed_variables["cost_FPA_Stock"] * monthly_stock_FPA[m]
+ fixed_variables["cost_FPB_Stock"] * monthly_stock_FPB[m]
+ fixed_variables["cost_RPone_Stock"] * monthly_stock_RPone[m]
+ fixed_variables["cost_RPtwo_Stock"] * monthly_stock_RPtwo[m]
)
for m in range(nb_periods)
)
# putting all the needed information in a dictionary
model_info = {
"model_created": prob,
"model_solved": None,
"Monthly_Production_FPA": monthly_production_FPA,
"Monthly_Stock_FPA": monthly_stock_FPA,
"Monthly_Back_Order_FPA": monthly_back_order_FPA,
"Monthly_Production_FPB": monthly_production_FPB,
"Monthly_Stock_FPB": monthly_stock_FPB,
"Monthly_Back_Order_FPB": monthly_back_order_FPB,
"Monthly_Stock_RPone": monthly_stock_RPone,
"Monthly_Stock_RPtwo": monthly_stock_RPtwo,
"Monthly_Purchase_RPone": monthly_purchase_RPone,
"Monthly_Purchase_RPtwo": monthly_purchase_RPtwo,
}
print("Model created")
return model_info
def solve_model(model_info: dict, solver_name):
"""This function solves the model and returns all the solutions in a dictionary.
Args:
model_info (dict): the model_info passed by the create_model function
Returns:
dict: the model solved and and the solutions
"""
print("Solving the model...")
prob = model_info["model_created"]
nb_periods = len(model_info["Monthly_Production_FPA"])
if solver_name != "Default":
solver = getSolver(solver_name)
# solving the model
m_solved = prob.solve(solver)
else:
m_solved = prob.solve()
# getting the solution in the right variables
# for product A
prod_sol_FPA = [
value(model_info["Monthly_Production_FPA"][p]) for p in range(nb_periods)
]
stock_sol_FPA = [
value(model_info["Monthly_Stock_FPA"][p]) for p in range(nb_periods)
]
bos_sol_FPA = [
value(model_info["Monthly_Back_Order_FPA"][p]) for p in range(nb_periods)
]
# for product B
prod_sol_FPB = [
value(model_info["Monthly_Production_FPB"][p]) for p in range(nb_periods)
]
stock_sol_FPB = [
value(model_info["Monthly_Stock_FPB"][p]) for p in range(nb_periods)
]
bos_sol_FPB = [
value(model_info["Monthly_Back_Order_FPB"][p]) for p in range(nb_periods)
]
# for product 1
stock_RPone_sol = [
value(model_info["Monthly_Stock_RPone"][p]) for p in range(nb_periods)
]
stock_RPtwo_sol = [
value(model_info["Monthly_Stock_RPtwo"][p]) for p in range(nb_periods)
]
# for product 2
purchase_RPone_sol = [
value(model_info["Monthly_Purchase_RPone"][p]) for p in range(nb_periods)
]
purchase_RPtwo_sol = [
value(model_info["Monthly_Purchase_RPtwo"][p]) for p in range(nb_periods)
]
# put it in a dictionary
model_info = {
"model_created": prob,
"model_solved": m_solved,
"Monthly_Production_FPA": prod_sol_FPA,
"Monthly_Stock_FPA": stock_sol_FPA,
"Monthly_Back_Order_FPA": bos_sol_FPA,
"Monthly_Production_FPB": prod_sol_FPB,
"Monthly_Stock_FPB": stock_sol_FPB,
"Monthly_Back_Order_FPB": bos_sol_FPB,
"Monthly_Stock_RPone": stock_RPone_sol,
"Monthly_Purchase_RPone": purchase_RPone_sol,
"Monthly_Stock_RPtwo": stock_RPtwo_sol,
"Monthly_Purchase_RPtwo": purchase_RPtwo_sol,
}
print("Model solved")
return model_info
def create_results(model_info: dict, fixed_variables: dict, demand: pd.DataFrame):
"""This function creates the results of the model. The results dataframe is a concatenation of all the useful information.
Args:
model_info (dict): the dictionary created by the solve_model function
fixed_variables (dict): the fixed variables of the problem
demand (pd.DataFrame): the demand for A and B
Returns:
pd.DataFrame: dataframe that gathers all the useful information about the solution
"""
print("Creating the results...")
# getting the demand for A and B
demand_series_FPA = demand["Demand_A"]
demand_series_FPB = demand["Demand_B"]
nb_periods = len(demand_series_FPA)
# calculate the different costs
cost_FPBO_FPA = fixed_variables["cost_FPA_Back_Order"] * np.array(
model_info["Monthly_Back_Order_FPA"]
)
cost_stock_FPA = fixed_variables["cost_FPA_Stock"] * np.array(
model_info["Monthly_Stock_FPA"]
)
cost_FPBO_FPB = fixed_variables["cost_FPB_Back_Order"] * np.array(
model_info["Monthly_Back_Order_FPB"]
)
cost_stock_FPB = fixed_variables["cost_FPB_Stock"] * np.array(
model_info["Monthly_Stock_FPB"]
)
cost_stock_RPone = fixed_variables["cost_RPone_Stock"] * np.array(
model_info["Monthly_Stock_RPone"]
)
cost_stock_RPtwo = fixed_variables["cost_RPtwo_Stock"] * np.array(
model_info["Monthly_Stock_RPtwo"]
)
cost_product_RPone = fixed_variables["cost_RPone_Purchase"] * np.array(
model_info["Monthly_Purchase_RPone"]
)
cost_product_RPtwo = fixed_variables["cost_RPtwo_Purchase"] * np.array(
model_info["Monthly_Purchase_RPtwo"]
)
# the total cost (sum of the costs)
total_cost = (
cost_FPBO_FPA
+ cost_stock_FPA
+ cost_FPBO_FPB
+ cost_stock_FPB
+ cost_stock_RPone
+ cost_product_RPone
+ cost_product_RPtwo
+ cost_stock_RPtwo
)
# creation of the dictionary that will be used to create the dataframe
dict_for_dataframe = {
"Monthly Production FPA": model_info["Monthly_Production_FPA"],
"Monthly Stock FPA": model_info["Monthly_Stock_FPA"],
"Monthly BO FPA": model_info["Monthly_Back_Order_FPA"],
"Max Capacity FPA": [fixed_variables["Max_Capacity_FPA"]] * nb_periods,
"Monthly Production FPB": model_info["Monthly_Production_FPB"],
"Monthly Stock FPB": model_info["Monthly_Stock_FPB"],
"Monthly BO FPB": model_info["Monthly_Back_Order_FPB"],
"Max Capacity FPB": [fixed_variables["Max_Capacity_FPB"]] * nb_periods,
"Monthly Stock RP1": model_info["Monthly_Stock_RPone"],
"Monthly Stock RP2": model_info["Monthly_Stock_RPtwo"],
"Monthly Purchase RP1": model_info["Monthly_Purchase_RPone"],
"Monthly Purchase RP2": model_info["Monthly_Purchase_RPtwo"],
"Demand FPA": demand_series_FPA,
"Demand FPB": demand_series_FPB,
"Stock FPA Cost": cost_stock_FPA,
"Stock FPB Cost": cost_stock_FPB,
"Stock RP1 Cost": cost_stock_RPone,
"Stock RP2 Cost": cost_stock_RPtwo,
"Purchase RP1 Cost": cost_product_RPone,
"Purchase RP2 Cost": cost_product_RPtwo,
"BO FPA Cost": cost_FPBO_FPA,
"BO FPB Cost": cost_FPBO_FPB,
"Total Cost": total_cost,
"index": range(nb_periods),
}
results = pd.DataFrame(dict_for_dataframe).round()
print("Results created")
# we erase the last two observations because of how the model is created,
# their values don't have a meaning
return results[:-2]
|
# Create app for demo-production-planning create_data.py
import numpy as np
import pandas as pd
# this code is used to create the csv file for the demand, it is the source data for the problem
def create_time_series(nb_months=12,mean_A=840,mean_B=760,std_A=96,std_B=72, amplitude_A=108,amplitude_B=144):
time_series_A = [mean_A]
time_series_B = [mean_B]
for i in range(1,nb_months):
time_series_A.append(np.random.normal(mean_A + amplitude_A*np.sin(2*np.pi*i/12), std_A))
time_series_B.append(np.random.normal(mean_B + amplitude_B*np.sin((2*np.pi*(i+6))/12), std_B))
time_series_A = pd.Series(time_series_A)
time_series_B = pd.Series(time_series_B)
month = [i%12 for i in range(nb_months)]
year = [i//12 + 2020 for i in range(nb_months)]
df_time_series = pd.DataFrame({"Year":year,
"Month":month,
"Demand_A":time_series_A,
"Demand_B":time_series_B})
return df_time_series
def time_series_to_csv(nb_months=12,mean_A=840,mean_B=760,std_A=96,std_B=72, amplitude_A=108,amplitude_B=144):
time_serie_data = create_time_series(nb_months,
mean_A,
mean_B,
std_A,
std_B,
amplitude_A,
amplitude_B)
time_serie_data.to_csv('data/time_series_demand.csv')
|
# Create app for demo-production-planning databases_md.py
import pathlib
d_chart_csv_path = None
# this path is used to create a temporary file that will allow us to
# download a table in the Datasouces page
tempdir = pathlib.Path(".tmp")
tempdir.mkdir(exist_ok=True)
PATH_TO_TABLE = str(tempdir / "table.csv")
da_databases_md = """
<|container|
# Data**sources**{: .color-primary }
<|layout|columns=3 2 1|columns[mobile]=1|class_name=align_columns_bottom|
<layout_scenario|
<|layout|columns=1 1 3|columns[mobile]=1|class_name=align_columns_bottom|
<year|
Year
<|{sm_selected_year}|selector|lov={sm_year_selector}|dropdown|width=100%|on_change=change_sm_month_selector|>
|year>
<month|
Month
<|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|>
|month>
<scenario|
**Scenario**
<|{selected_scenario}|selector|lov={scenario_selector}|dropdown|adapter=adapt_scenarios|width=18rem|>
|scenario>
|>
|layout_scenario>
<|
Table
<|{sm_graph_selected}|selector|lov={sm_graph_selector}|dropdown|>
|>
<br/>
<|{d_chart_csv_path}|file_download|name=table.csv|label=Download table|>
|>
<|part|render={len(scenario_selector)>0}|class_name=mt2|
<|{chart}|table|width=100%|rebuild|>
|>
|>
"""
|
# Create app for demo-production-planning data_visualization_md.py
import pandas as pd
import json
with open('data/fixed_variables_default.json', "r") as f:
fixed_variables_default = json.load(f)
# no code from Taipy Core has been executed yet, we read the csv file this way
da_initial_demand = pd.read_csv('data/time_series_demand.csv')[['Year', 'Month', 'Demand_A', 'Demand_B']]\
.astype(int)
da_initial_demand.columns = [col.replace('_', ' ') for col in da_initial_demand.columns]
da_initial_variables = pd.DataFrame({key: [fixed_variables_default[key]]
for key in fixed_variables_default.keys() if 'Initial' in key})
# The code below is to correctly format the name of the columns
da_initial_variables.columns = [col.replace('_', ' ').replace('one', '1').replace('two', '2').replace('initial ', '').replace('Initial ', '')
for col in da_initial_variables.columns]
da_initial_variables.columns = [col[0].upper() +
col[1:] for col in da_initial_variables.columns]
da_data_visualisation_md = """
<|container|
# Data **Visualization**{: .color-primary }
<|Expand here to see more data|expandable|expanded=False|
<|layout|columns=5 3 3|columns[mobile]=1|
### Initial **stock**{: .color-secondary } \
<|{da_initial_variables[[col for col in da_initial_variables.columns if 'Stock' in col]]}|table|show_all|width=100%|>
### Incoming **purchases**{: .color-secondary } \
<|{da_initial_variables[[col for col in da_initial_variables.columns if 'Purchase' in col]]}|table|show_all|width=100%|>
### Initial **production**{: .color-secondary } \
<|{da_initial_variables[[col for col in da_initial_variables.columns if 'Production' in col]]}|table|show_all|width=100%|>
|>
## **Demand**{: .color-secondary } of the upcoming months
<|{da_initial_demand.round()}|table|width=fit-content|show_all|height=fit-content|>
|>
### **Evolution**{: .color-primary } of the demand
<|{da_initial_demand}|chart|x=Month|y[1]=Demand A|y[2]=Demand B|>
|>
"""
|
# Create app for demo-production-planning shared.md
<|toggle|theme|>
<|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|>
<|Need any help?|button|on_action={lambda s: s.assign('dialog_help', True)}|id=help_button|>
<|{dialog_help}|dialog|title=Walkthough|on_action=validate_help|labels=Go!|id=dialog_help|width=100%|
<|container|
## Page 1: Data Visualization
Upon registering with a new account (name & password), the first page is displayed.
The primary chart depicts future demand for finished products A (FPA)
and B (FPB) over the next 11 months, with the current month marked as month 0.
Just above the chart, by clicking "Expand here," you can access an expandable
Taipy front-end containing initial production data at time 0 (current month):
stock & production levels, incoming raw material orders, and demand, all presented in a table.
## Page 2: Scenario Manager
Create, configure, and optimize production scenarios.
This is the application's main page, where users can create new scenarios,
adjust scenario parameters (on the 'Scenario Configuration' side of the page),
and re-submit scenarios for re-optimization based on modified parameters.
Initially, no scenario is available, and the Year/Month corresponds to the current month.
### Creating your first scenario
The purpose of the model is to generate a production plan (level of production
for both products) for the the next 11 months in order to:
- Meet the demand for the finished product
- Respect the Capacity Constraints
- Minimize 2 cost functions:
- Back ordering costs: the costs of not meeting the demand on time
- Stock costs: costs of storing raw and finished products.
It is worth noting that these 2 cost functions are kind of opposite:
if I have a lot of stock, I should easily meet the demand. Conversely,
a low inventory may put the demand in jeopardy.
When creating a first scenario, two key indicators , "Back Order Cost"
and "Stock Cost," appear above an empty main chart (no plan generated yet).
Click on "New Scenario" to launch the optimization algorithm, which
quickly finds the optimal production levels, respecting the capacity
constraints and optimizing costs.
Results can be displayed as time series or pie charts, and different
graphs can be selected by choosing the data to display (costs, productions, etc.).
### Modifying the Parameters
On the right-hand side of this panel, you can modify various parameters categorized into three sections:
- **Capacity Constraints**: Modify capacity values for different products (finished and raw).
- **Objectives Weights**: Emphasize minimizing a specific cost (stock or backordering).
- **Initial Parameters**: Modify other parameters like Initial Stock and Unit Cost.
By "Playing" with these parameters, you can create several scenarios.
## Page 3: Compare Scenarios
To Compare two scenarios, select them then click on the "compare scenario" button.
You can select different comparison metrics such as costs, purchases, and production levels, etc.
## Page 4: Compare Cycles
This demo also introduces the concept of ‘Cycles".
In this manufacturing context, the cycle is monthly.
This implies that scenarios are created each month.
Only one of the generated scenarios will be chosen as the
‘official scenario', this scenario is referred as the "Primary" scenario.
This demo already contains many scenarios generated from the
previous months. The "Evolution of costs" bar chart displays
the performance for every single "primary' scenario generated
every month for the past few years. Compare monthly stock and
backorder costs from January 2021 to the present month using stacked bar charts.
## Page 5: Datasources
Access and display various tables associated with
a selected scenario. Conveniently download data tables in CSV format.
|>
|>
|
# Create app for demo-production-planning shared.py
from taipy.gui import notify, navigate, Icon
import taipy as tp
import datetime as dt
# User id
state_id = None
# Metrics for scenario manager and comparison
sum_costs = 0
sum_costs_of_stock = 0
sum_costs_of_BO = 0
sum_costs_of_BO = 0
# Navigation
page = "Data Visualization"
menu_lov = [("Data-Visualization", Icon('images/icons/visualize.svg', 'Data Visualization')),
("Scenario-Manager", Icon('images/icons/scenario.svg', 'Scenario Manager')),
("Compare-Scenarios", Icon('images/icons/compare.svg', 'Compare Scenarios')),
("Compare-Cycles", Icon('images/icons/cycle.svg', 'Compare Cycles')),
('Databases', Icon('images/icons/data_base.svg', 'Databases'))]
def menu_fct(state, var_name: str, var_value):
"""Functions that is called when there is a change in the menu control
Args:
state (_type_): the state object of Taipy
var_name (str): the changed variable name
var_value (_type_): the changed variable value
"""
# change the value of the state.page variable in order to render the
# correct page
state.page = var_value['args'][0]
navigate(state, to=state.page)
# security on the 'All' option of sm_graph_selected that can be selected
# only on the 'Databases' page
if state.page != 'Databases' and state.sm_graph_selected == 'All':
state.sm_graph_selected = 'Costs'
# Functions for scenarios
def adapt_scenarios(scenario):
return 'Primary ' + scenario.name if scenario.is_primary else scenario.name
def create_sm_tree_dict(scenarios, sm_tree_dict: dict = None):
"""This function creates a tree dict from a list of scenarios. The levels of the tree are:
year/month/scenario
Args:
scenarios (list): a list of scenarios
sm_tree_dict (dict, optional): the tree gathering all the scenarios. Defaults to None.
Returns:
tree: the tree created to classify the scenarios
"""
print("Creating tree dict...")
if sm_tree_dict is None:
# Initialize the tree dict if it is not already initialized
sm_tree_dict = {}
# Add all the scenarios that are in the list
for scenario in scenarios:
# Create a name for the cycle
date = scenario.creation_date
year = f"{date.strftime('%Y')}"
period = f"{date.strftime('%b')}"
# Add the cycle if it was not already added
if year not in sm_tree_dict:
sm_tree_dict[year] = {}
if period not in sm_tree_dict[year]:
sm_tree_dict[year][period] = []
sm_tree_dict[year][period] += [scenario]
return sm_tree_dict
def create_time_selectors():
"""This function creates the time selectors that will be displayed on the GUI and it is also creating
the tree dict gathering all the scenarios.
Returns:
dict: the tree dict gathering all the scenarios
list: the list of years
list: the list of months
"""
all_scenarios_ordered = sorted(tp.get_scenarios(), key=lambda x: x.creation_date.timestamp())
sm_tree_dict = create_sm_tree_dict(all_scenarios_ordered)
if sm_current_year not in list(sm_tree_dict.keys()):
sm_tree_dict[sm_current_year] = {}
if sm_current_month not in sm_tree_dict[sm_current_year]:
sm_tree_dict[sm_current_year][sm_current_month] = []
sm_year_selector = list(sm_tree_dict.keys())
sm_month_selector = list(sm_tree_dict[sm_selected_year].keys())
return sm_tree_dict, sm_year_selector, sm_month_selector
def change_sm_month_selector(state):
"""
This function is called when the user changes the year selector. It updates the selector shown on the GUI
for the month selector and is calling the same function for the scenario selector.
Args:
state (State): all the GUI variables
"""
state.sm_month_selector = list(state.sm_tree_dict[state.sm_selected_year].keys())
if state.sm_selected_month not in state.sm_month_selector:
state.sm_selected_month = state.sm_month_selector[0]
change_scenario_selector(state)
def change_scenario_selector(state):
"""
This function is called when the user changes the month selector. It updates the selector shown on the GUI
for the scenario selector.
Args:
state (State): all the GUI variables
"""
state.scenario_selector = list(state.sm_tree_dict[state.sm_selected_year][state.sm_selected_month])
state.scenario_selector_two = state.scenario_selector.copy()
if len(state.scenario_selector) > 0:
state.selected_scenario = state.scenario_selector[0]
if (state.sm_selected_month != sm_current_month or\
state.sm_selected_year != sm_current_year) and\
state.sm_show_config_scenario:
state.sm_show_config_scenario = False
notify(state, "info", "This scenario is historical, you can't modify it")
else:
state.sm_show_config_scenario = True
def update_scenario_selector(state):
"""
This function will update the scenario selectors. It will be used when
we create a new scenario. If there is a scenario that is created, we will
add its (id,name) in this list.
Args:
scenarios (list): a list of tuples (scenario,properties)
"""
state.scenario_selector = [s for s in tp.get_scenarios() if 'user' in s.properties and\
state.state_id == s.properties['user']]
state.scenario_selector_two = state.scenario_selector.copy()
sm_tree_dict[state.sm_selected_year][state.sm_selected_month] = state.scenario_selector
scenario_selector = []
selected_scenario = None
scenario_selector_two = []
selected_scenario_two = None
# Initialization of scenario tree
sm_tree_dict = {}
sm_current_month = dt.date.today().strftime('%b')
sm_current_year = dt.date.today().strftime('%Y')
sm_selected_year = sm_current_year
sm_selected_month = sm_current_month
sm_tree_dict, sm_year_selector, sm_month_selector = create_time_selectors()
# Help
dialog_help = False
def restore_state(state):
state.cs_show_comparison = False
update_scenario_selector(state)
notify(state, 'info', 'Restoring your session')
def validate_help(state, action, payload):
state.dialog_help = False
|
# Create app for demo-production-planning compare_cycles_md.py
from data.create_data import time_series_to_csv
from config.config import scenario_cfg
from taipy.core import taipy as tp
import datetime as dt
import pandas as pd
cc_data = pd.DataFrame(
{
'Date': [dt.datetime(2021, 1, 1)],
'Cycle': [dt.date(2021, 1, 1)],
'Cost of Back Order': [0],
'Cost of Stock': [0]
})
cc_show_comparison = False
cc_layout = {'barmode': 'stack'}
def cc_create_scenarios_for_cycle():
"""This function creates scenarios for multiple cycles and submit them.
"""
date = dt.datetime.now() - dt.timedelta(days=365)
month = date.strftime('%b')
year = date.strftime('%Y')
current_month = dt.date.today().strftime('%b')
current_year = dt.date.today().strftime('%Y')
while month != current_month or year != current_year:
date += dt.timedelta(days=15)
month = date.strftime('%b')
year = date.strftime('%Y')
if month != current_month or year != current_year:
time_series_to_csv()
scenario = tp.create_scenario(scenario_cfg, creation_date=date, name=date.strftime('%d-%b-%Y'))
tp.submit(scenario)
def update_cc_data(state):
"""This function creates the evolution of the cost of back order and stock for the primary scenario of all the cycles."""
dates = []
cycles = []
costs_of_back_orders = []
costs_of_stock = []
all_scenarios = tp.get_primary_scenarios()
all_scenarios_ordered = sorted(
all_scenarios,
key=lambda x: x.creation_date.timestamp())
for scenario in all_scenarios_ordered:
results = scenario.results.read()
if results is not None:
date_ = scenario.creation_date
# creation of sum_costs_of_stock metrics
bool_costs_of_stock = [c for c in results.columns if 'Cost' in c and\
'Total' not in c and\
'Stock' in c]
sum_costs_of_stock = int(results[bool_costs_of_stock].sum(axis=1)\
.sum(axis=0))
# creation of sum_costs_of_BO metrics
bool_costs_of_BO = [c for c in results.columns if 'Cost' in c and\
'Total' not in c and\
'BO' in c]
sum_costs_of_BO = int(results[bool_costs_of_BO].sum(axis=1)\
.sum(axis=0))
dates.append(date_)
cycles.append(dt.date(date_.year, date_.month, 1))
costs_of_back_orders.append(sum_costs_of_BO)
costs_of_stock.append(sum_costs_of_stock)
state.cc_data = pd.DataFrame({'Date': dates,
'Cycle': cycles,
'Cost of Back Order': costs_of_back_orders,
'Cost of Stock': costs_of_stock})
cc_compare_cycles_md = """
<|container|
# **Compare**{: .color-primary} cycles
<|Start cycles comparison|button|on_action=update_cc_data|class_name=mb2|>
<|Table|expandable|expanded=False|
<|{cc_data}|table|>
|>
## Evolution of costs
<|{cc_data}|chart|type=bar|x=Cycle|y[1]=Cost of Back Order|y[2]=Cost of Stock|layout={cc_layout}|>
|>
"""
|
# Create app for demo-production-planning compare_scenario_md.py
import taipy as tp
import pandas as pd
cs_compare_scenario_md = """
<|part|class_name=container|
# **Compare**{: .color-primary} scenarios
Choose two scenarios to compare.
<|layout|columns=3 3 auto|columns[mobile]=1|gap=1.5rem|class_name=align_columns_bottom|
<layout_scenario|
**Scenario 1**
<|layout|columns=1 1 3|columns[mobile]=1|
Year <|{sm_selected_year}|selector|lov={sm_year_selector}|dropdown|width=100%|on_change=change_sm_month_selector|>
Month <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|>
**Scenario** <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|adapter=adapt_scenarios|width=18rem|>
|>
|layout_scenario>
<layout_scenario|
**Scenario 2**
<|layout|columns=1 1 3|columns[mobile]=1|
Year <|{sm_selected_year}|selector|lov={sm_year_selector}|dropdown|width=100%|on_change=change_sm_month_selector|>
Month <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|width=100%|on_change=change_scenario_selector|>
**Scenario** <|{selected_scenario_two}|selector|lov={scenario_selector_two}|dropdown|adapter=adapt_scenarios|>
|>
|layout_scenario>
<br/>
<br/>
<|Compare scenario|button|on_action=compare_scenarios|active={len(scenario_selector)>1}|>
|>
<|part|render={cs_show_comparison and len(scenario_selector)>=2}|class_name=mt2 card p2|
<|layout|columns=1 1 1|columns[mobile]=1|class_name=align_columns_bottom|
**Representation**
<|{cs_compar_graph_selected}|selector|lov={cs_compar_graph_selector}|dropdown|>
**Total cost of scenario 1:** *<|{str(int(sum_costs/1000))+' K'}|>*
{: .text-center}
**Total cost of scenario 2:** *<|{str(int(cs_sum_costs_two/1000))+' K'}|>*
{: .text-center}
|>
<|part|render={cs_compar_graph_selected=='Metrics'}|class_name=mt2|
<|layout|columns=1 1|columns[mobile]=1|
<|{cs_comparaison_metrics_df[cs_comparaison_metrics_df['Metrics']=='BO Cost']}|chart|type=bar|x=Metrics|y[1]=Scenario 1: BO Cost|y[2]=Scenario 2: BO Cost|color[2]=#2b93db|height={cs_height_bar_chart}|>
<|{cs_comparaison_metrics_df[cs_comparaison_metrics_df['Metrics']=='Stock Cost']}|chart|type=bar|x=Metrics|y[1]=Scenario 1: Stock Cost|y[2]=Scenario 2: Stock Cost|color[1]=#ff7f0e|color[2]=#ff9a41|height={cs_height_bar_chart}|>
|>
|>
<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Cost|y[2]=Scenario 2 Cost|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Costs'}|>
<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Purchase|y[2]=Scenario 2 Purchase|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Purchases'}|>
<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Production|y[2]=Scenario 2 Production|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Productions'}|>
<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 Stock|y[2]=Scenario 2 Stock|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Stocks'}|>
<|{cs_comparaison_df}|chart|x=index|y[1]=Scenario 1 BO|y[2]=Scenario 2 BO|color[2]=#1f77b4|line[2]=dash|render={cs_compar_graph_selected=='Back Order'}|>
|>
|>
"""
def compare_scenarios(state):
"""This function compares two scenarios chosen by the user on different metrics and populate dataframes for the comparison graphs.
Args:
state (State): All the GUI variables
"""
state.cs_show_comparison = True
# get of the two scenarios chosen by the user
results_1 = state.selected_scenario.results.read()
results_2 = state.selected_scenario_two.results.read()
state.cs_sum_costs_two = results_2['Total Cost'].sum()
# calculate the partial costs of the two scenarios
bool_costs_of_stock = [c for c in results_2.columns
if 'Cost' in c and 'Total' not in c and 'Stock' in c]
state.cs_sum_costs_of_stock_two = int(results_2[bool_costs_of_stock].sum(axis=1)\
.sum(axis=0))
bool_costs_of_BO = [c for c in results_2.columns
if 'Cost' in c and 'Total' not in c and 'BO' in c]
state.cs_sum_costs_of_BO_two = int(results_2[bool_costs_of_BO].sum(axis=1)\
.sum(axis=0))
# populate the dataframes for the comparison graphs
new_result_1 = pd.DataFrame({"index": results_1.index})
new_result_2 = pd.DataFrame({"index": results_2.index})
columns_to_merge = ['Cost', 'Purchase', 'Production', 'Stock', 'BO']
for col in columns_to_merge:
if col == 'Cost':
bool_col_1 = [c for c in results_1.columns
if col in c and 'Total' not in c]
bool_col_2 = [c for c in results_2.columns
if col in c and 'Total' not in c]
else:
bool_col_1 = [c for c in results_1.columns
if col in c and 'Total' not in c and 'Cost' not in c]
bool_col_2 = [c for c in results_2.columns
if col in c and 'Total' not in c and 'Cost' not in c]
new_result_1[col] = results_1[bool_col_1].sum(axis=1)
new_result_2[col] = results_2[bool_col_2].sum(axis=1)
new_result_1.columns = ['Scenario 1 ' + column if column != 'index' else 'index'
for column in new_result_1.columns]
new_result_2.columns = ['Scenario 2 ' + column if column !='index' else 'index'
for column in new_result_2.columns]
state.cs_comparaison_metrics_df = pd.DataFrame(
{
"Metrics": [ "Stock Cost", "BO Cost"],
"Scenario 1: Stock Cost": [state.sum_costs_of_stock, None],
"Scenario 2: Stock Cost": [state.cs_sum_costs_of_stock_two, None],
"Scenario 1: BO Cost": [None, state.sum_costs_of_BO],
"Scenario 2: BO Cost": [None, state.cs_sum_costs_of_BO_two]
})
state.cs_comparaison_df = pd.merge(new_result_1, new_result_2, on="index", how="inner")
print("Comparaison done")
pass
cs_height_bar_chart = "80%"
cs_show_comparison = False
cs_compar_graph_selector = [
'Metrics',
'Costs',
'Purchases',
'Productions',
'Stocks',
'Back Order']
cs_compar_graph_selected = cs_compar_graph_selector[0]
cs_comparaison_df = pd.DataFrame({'index': [0],
'Scenario 1 Cost': [0],
'Scenario 1 Purchase': [0],
'Scenario 1 Production': [0],
'Scenario 1 Stock': [0],
'Scenario 1 BO': [0],
'Scenario 2 Cost': [0],
'Scenario 2 Purchase': [0],
'Scenario 2 Production': [0],
'Scenario 2 Stock': [0],
'Scenario 2 BO': [0]})
cs_comparaison_metrics_df = pd.DataFrame({"Metrics": ["Stock Cost", "BO Cost"],
"Scenario 1: Stock Cost": [0, 0],
"Scenario 2: Stock Cost": [0, 0],
"Scenario 1: BO Cost": [0, 0],
"Scenario 2: BO Cost": [0, 0]})
cs_sum_costs_of_stock_two = 0
cs_sum_costs_of_BO_two = 0
cs_sum_costs_two = 0
|
# Create app for demo-production-planning scenario_manager.md
<|container|
# **Scenario**{: .color-primary } Manager
<|layout|columns=8 4 auto|
<layout_scenario|
<|layout|columns=1 1 3|
Year <|{sm_selected_year}|selector|lov={sm_year_selector}|dropdown|on_change=change_sm_month_selector|>
Month <|{sm_selected_month}|selector|lov={sm_month_selector}|dropdown|on_change=change_scenario_selector|>
**Scenario** <|{selected_scenario}|selector|lov={scenario_selector}|dropdown|adapter=adapt_scenarios|width=18rem|class_name=success|>
|>
|layout_scenario>
Graph <|{sm_graph_selected}|selector|lov={sm_graph_selector}|dropdown|>
<toggle_chart|
Pie/Line chart
<|{sm_show_pie}|toggle|lov={sm_choice_chart}|value_by_id|active={not 'Product ' in sm_graph_selected}|>
|toggle_chart>
|>
---
<|layout|columns=9 3|gap=1.5rem|
<|part|render={len(scenario_selector)>0}|
<|layout|columns=1 1|gap=1rem|
<|part|class_name=card mb1 p2 text_center|
<|{str(int(sum_costs_of_BO/1000))+' K'}|indicator|value={sum_costs_of_BO}|min=50_000|max=1_000|width=93%|>
**Back Order Cost**
|>
<|part|class_name=card mb1 p2 text_center|
<|{str(int(sum_costs_of_stock/1000))+' K'}|indicator|value={sum_costs_of_stock}|min=100_000|max=25_000|width=93%|>
**Stock Cost**
|>
|>
<|{pie_results.loc[['Stock FPA Cost', 'Stock FPB Cost', 'Stock RP1 Cost', 'Stock RP2 Cost', 'Purchase RP1 Cost', 'Purchase RP2 Cost', 'BO FPA Cost', 'BO FPB Cost']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Costs'}|>
<|{sm_results}|chart|x=index|y[1]=Stock FPA Cost|y[2]=Stock FPB Cost|y[3]=Stock RP1 Cost|y[4]=Stock RP2 Cost|y[5]=Purchase RP1 Cost|y[6]=Purchase RP2 Cost|y[7]=BO FPA Cost|y[8]=BO FPB Cost|y[9]=Total Cost|render={sm_show_pie=='chart' and sm_graph_selected=='Costs'}|>
<|{pie_results.loc[['Monthly Purchase RP1', 'Monthly Purchase RP2']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Purchases'}|>
<|{sm_results}|chart|x=index|y[1]=Monthly Purchase RP1|y[2]=Monthly Purchase RP2|render={sm_show_pie=='chart' and sm_graph_selected=='Purchases'}|>
<|{pie_results.loc[['Monthly Production FPA', 'Max Capacity FPA', 'Monthly Production FPB', 'Max Capacity FPB']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Productions'}|>
<|{sm_results}|chart|x=index|y[1]=Monthly Production FPA|y[2]=Max Capacity FPA|line[2]=dash|y[3]=Monthly Production FPB|y[4]=Max Capacity FPB|line[4]=dash|render={sm_show_pie=='chart' and sm_graph_selected=='Productions'}|>
<|{pie_results.loc[['Monthly Stock FPA', 'Monthly Stock FPB', 'Monthly Stock RP1', 'Monthly Stock RP2']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Stocks'}|>
<|{sm_results}|chart|x=index|y[1]=Monthly Stock FPA|y[2]=Monthly Stock FPB|y[3]=Monthly Stock RP1|y[4]=Monthly Stock RP2|render={sm_show_pie=='chart' and sm_graph_selected=='Stocks'}|>
<|{pie_results.loc[['Monthly BO FPA', 'Monthly BO FPB']]}|chart|type=pie|values=values|labels=labels|render={sm_show_pie=='pie' and sm_graph_selected=='Back Order'}|>
<|{sm_results}|chart|x=index|y[1]=Monthly BO FPA|y[2]=Monthly BO FPB|render={sm_show_pie=='chart' and sm_graph_selected=='Back Order'}|>
<|{sm_results}|chart|x=index|y[1]=Monthly Production FPA|y[2]=Monthly Stock FPA|y[3]=Monthly BO FPA|y[4]=Max Capacity FPA|line[4]=dash|y[5]=Demand FPA|render={sm_graph_selected=='Product FPA'}|>
<|{sm_results}|chart|x=index|y[1]=Monthly Production FPB|y[2]=Monthly Stock FPB|y[3]=Monthly BO FPB|y[4]=Max Capacity FPB|line[4]=dash|y[5]=Demand FPB|render={sm_graph_selected=='Product FPB'}|>
<|{sm_results}|chart|x=index|y[1]=Monthly Stock RP1|y[2]=Monthly Purchase RP1|render={sm_graph_selected=='Product RP1'}|>
<|{sm_results}|chart|x=index|y[1]=Monthly Stock RP2|y[2]=Monthly Purchase RP2|render={sm_graph_selected=='Product RP2'}|>
|>
<no_scenario|part|render={len(scenario_selector)==0}|
#### No scenario created for the current month #### {: .mt0 .color_secondary }
|no_scenario>
<|mt2|
<|{sm_param_selected}|selector|lov={sm_param_selector}|class_name=fullwidth|>
<|part|render={sm_param_selected == 'Capacity Constraints'}|
<|{sm_product_param}|toggle|lov={sm_choice_product_param}|value_by_id|class_name=mb1 text_center|>
<|part|render={sm_product_param == 'product_FPA'}|
Max Capacity FPA : *<|{fixed_variables.Max_Capacity_FPA}|>*
<|{fixed_variables.Max_Capacity_FPA}|slider|min=332|max=1567|active={sm_show_config_scenario}|>
Max Capacity of FPA and FPB : *<|{fixed_variables.Max_Capacity_of_FPA_and_FPB}|>*
<|{fixed_variables.Max_Capacity_of_FPA_and_FPB}|slider|min=598|max=2821|active={sm_show_config_scenario}|>
|>
<|part|render={sm_product_param == 'product_FPB'}|
Max Capacity FPB : *<|{fixed_variables.Max_Capacity_FPB}|>*
<|{fixed_variables.Max_Capacity_FPB}|slider|min=332|max=1567|active={sm_show_config_scenario}|>
Max Capacity of FPA and FPB : *<|{fixed_variables.Max_Capacity_of_FPA_and_FPB}|>*
<|{fixed_variables.Max_Capacity_of_FPA_and_FPB}|slider|min=598|max=2821|active={sm_show_config_scenario}|>
|>
<|part|render={sm_product_param == 'product_RPone'}|
Max Stock RP1 : *<|{fixed_variables.Max_Stock_RPone}|>*
<|{fixed_variables.Max_Stock_RPone}|slider|min=28|max=132|active={sm_show_config_scenario}|>
|>
<|part|render={sm_product_param == 'product_RPtwo'}|
Max Stock RP2 : *<|{fixed_variables.Max_Stock_RPtwo}|>*
<|{fixed_variables.Max_Stock_RPtwo}|slider|min=21|max=99|active={sm_show_config_scenario}|>
|>
|>
<|part|render={sm_param_selected == 'Objective Weights'}|
Weight of Stock : *<|{fixed_variables.Weight_of_Stock}|>*
<|{fixed_variables.Weight_of_Stock}|slider|min=35|max=165|active={sm_show_config_scenario}|>
Weight of Back Order : *<|{fixed_variables.Weight_of_Back_Order}|>*
<|{fixed_variables.Weight_of_Back_Order}|slider|min=35|max=165|active={sm_show_config_scenario}|>
|>
<|part|render={sm_param_selected == 'Initial Parameters'}|
<|{sm_product_param}|toggle|lov={sm_choice_product_param}|value_by_id|class_name=mb1 text_center|>
<|part|render={sm_product_param == 'product_FPA'}|
Unit Cost - FPA Back Order : *<|{fixed_variables.cost_FPA_Back_Order}|>*
<|{fixed_variables.cost_FPA_Back_Order}|slider|min=70|max=330|active={sm_show_config_scenario}|>
Unit Cost - FPA Stock : *<|{fixed_variables.cost_FPA_Stock}|>*
<|{fixed_variables.cost_FPA_Stock}|slider|min=15|max=74|active={sm_show_config_scenario}|>
Initial Back Order FPA : *<|{fixed_variables.Initial_Back_Order_FPA}|>*
<|{fixed_variables.Initial_Back_Order_FPA}|slider|min=0|max=50|active={sm_show_config_scenario}|>
Initial Stock FPA : *<|{fixed_variables.Initial_Stock_FPA}|>*
<|{fixed_variables.Initial_Stock_FPA}|slider|min=10|max=49|active={sm_show_config_scenario}|>
Initial Production FPA : *<|{fixed_variables.Initial_Production_FPA}|>*
<|{fixed_variables.Initial_Production_FPA}|slider|min=297|max=1402|active={sm_show_config_scenario}|>
|>
<|part|render={sm_product_param == 'product_FPB'}|
Unit Cost - FPB Back Order : *<|{fixed_variables.cost_FPB_Back_Order}|>*
<|{fixed_variables.cost_FPB_Back_Order}|slider|min=87|max=412|active={sm_show_config_scenario}|>
Unit Cost - FPB Stock : *<|{fixed_variables.cost_FPB_Stock}|>*
<|{fixed_variables.cost_FPB_Stock}|slider|min=14|max=66|active={sm_show_config_scenario}|>
Initial Back Order FPB : *<|{fixed_variables.Initial_Back_Order_FPB}|>*
<|{fixed_variables.Initial_Back_Order_FPB}|slider|min=8|max=41|active={sm_show_config_scenario}|>
Initial Stock FPB : *<|{fixed_variables.Initial_Stock_FPB}|>*
<|{fixed_variables.Initial_Stock_FPB}|slider|min=0|max=50|active={sm_show_config_scenario}|>
Initial Production FPB : *<|{fixed_variables.Initial_Production_FPB}|>*
<|{fixed_variables.Initial_Production_FPB}|slider|min=280|max=1320|active={sm_show_config_scenario}|>
|>
<|part|render={sm_product_param == 'product_RPone'}|
Initial Stock RP1 : *<|{fixed_variables.Initial_Stock_RPone}|>*
<|{fixed_variables.Initial_Stock_RPone}|slider|min=10|max=49|active={sm_show_config_scenario}|>
Unit Cost - RP1 Stock : *<|{fixed_variables.cost_RPone_Stock}|>*
<|{fixed_variables.cost_RPone_Stock}|slider|min=10|max=49|active={sm_show_config_scenario}|>
Unit Cost - RP1 Purchase : *<|{fixed_variables.cost_RPone_Purchase}|>*
<|{fixed_variables.cost_RPone_Purchase}|slider|min=35|max=165|active={sm_show_config_scenario}|>
Initial Purchase RP1 : *<|{fixed_variables.Initial_Purchase_RPone}|>*
<|{fixed_variables.Initial_Purchase_RPone}|slider|min=12|max=57|active={sm_show_config_scenario}|>
|>
<|part|render={sm_product_param == 'product_RPtwo'}|
Initial Stock RP2 : *<|{fixed_variables.Initial_Stock_RPtwo}|>*
<|{fixed_variables.Initial_Stock_RPtwo}|slider|min=14|max=66|active={sm_show_config_scenario}|>
Unit Cost - RP2 Stock : *<|{fixed_variables.cost_RPtwo_Stock}|>*
<|{fixed_variables.cost_RPtwo_Stock}|slider|min=21|max=99|active={sm_show_config_scenario}|>
Unit Cost - RP2 Purchase : *<|{fixed_variables.cost_RPtwo_Purchase}|>*
<|{fixed_variables.cost_RPtwo_Purchase}|slider|min=52|max=247|active={sm_show_config_scenario}|>
Initial Purchase RP2 : *<|{fixed_variables.Initial_Purchase_RPtwo}|>*
<|{fixed_variables.Initial_Purchase_RPtwo}|slider|min=14|max=66|active={sm_show_config_scenario}|>
|>
|>
<|Delete|button|on_action=delete_scenario_fct|active={len(scenario_selector)>0 and sm_show_config_scenario}|class_name=fullwidth error mb_half|>
<|Make Primary|button|on_action=make_primary|active={len(scenario_selector)>0 and not selected_scenario.is_primary and sm_show_config_scenario}|class_name=fullwidth secondary mb_half|>
<|Re-optimize|button|on_action=submit_scenario|active={len(scenario_selector)>0 and sm_show_config_scenario}|class_name=fullwidth secondary mb_half|>
<|New scenario|button|on_action=create_new_scenario||active={sm_show_config_scenario}|class_name=fullwidth plain mb_half|>
|>
|>
|>
|
# Create app for demo-production-planning scenario_manager_md.py
from pages.shared import update_scenario_selector
from taipy.gui import notify, invoke_long_callback, Markdown
import taipy as tp
from config.config import scenario_cfg
import datetime as dt
import pandas as pd
from config.config import fixed_variables_default
from taipy.gui import Icon
import pandas as pd
# Toggle for setting charts
sm_choice_chart = [("pie", Icon("images/icons/pie_chart.svg", "pie")),
("chart", Icon("images/icons/bar_chart.svg", "chart"))]
sm_show_pie = sm_choice_chart[1][0]
sm_results = pd.DataFrame({"Monthly Production FPA":[],
"Monthly Stock FPA": [],
"Monthly BO FPA": [],
"Max Capacity FPA": [],
"Monthly Production FPB": [],
"Monthly Stock FPB": [],
"Monthly BO FPB": [],
"Max Capacity FPB": [],
"Monthly Stock RP1":[],
"Monthly Stock RP2":[],
"Monthly Purchase RP1":[],
"Monthly Purchase RP2":[],
"Demand FPA": [],
"Demand FPB": [],
'Stock FPA Cost': [],
'Stock FPB Cost': [],
'Stock RP1 Cost': [],
'Stock RP2 Cost': [],
'Purchase RP1 Cost': [],
'Purchase RP2 Cost': [],
"BO FPA Cost":[],
"BO FPB Cost":[],
"Total Cost": [],
"index": []})
pie_results = pd.DataFrame(
{
"values": [1] * len(list(sm_results.columns)),
"labels": list(sm_results.columns)
}, index=list(sm_results.columns)
)
chart = sm_results[['index',
'Purchase RP1 Cost',
'Stock RP1 Cost',
'Stock RP2 Cost',
'Purchase RP2 Cost',
'Stock FPA Cost',
'Stock FPB Cost',
'BO FPA Cost',
'BO FPB Cost',
'Total Cost']]
sm_param_selector = ['Capacity Constraints','Objective Weights','Initial Parameters']
sm_param_selected = sm_param_selector[0]
# Toggle for choosing the sliders
sm_choice_product_param = [("product_RPone", Icon("images/P1.png", "product_RPone")),
("product_RPtwo", Icon("images/P2.png", "product_RPtwo")),
("product_FPA", Icon("images/PA.png", "product_FPA")),
("product_FPB", Icon("images/PB.png", "product_FPB"))]
sm_product_param = 'Else'
# Button for configuring scenario
sm_show_config_scenario = True
# Choose the graph to display
sm_graph_selector = [
'Costs',
'Purchases',
'Productions',
'Stocks',
'Back Order',
'Product RP1',
'Product RP2',
'Product FPA',
'Product FPB']
sm_graph_selected = sm_graph_selector[0]
fixed_variables = fixed_variables_default
def make_primary(state):
tp.set_primary(state.selected_scenario)
update_scenario_selector(state)
state.selected_scenario = state.selected_scenario
def delete_scenario_fct(state):
if state.selected_scenario.is_primary:
notify(
state,
"warning",
"You can't delete the primary scenario of the month")
else:
tp.delete(state.selected_scenario.id)
update_scenario_selector(state)
if len(state.scenario_selector) != 0:
state.selected_scenario = state.scenario_selector[0]
def create_new_scenario(state):
"""
This function is used whan the 'create' button is pressed in the scenario_manager_md page.
See the scenario_manager_md page for more information. It will configure another scenario,
create it and submit it.
Args:
state (_type_): the state object of Taipy
"""
name = f"{dt.datetime.now().strftime('%d-%b-%Y')} Nb : {len(state.scenario_selector)}"
scenario = tp.create_scenario(scenario_cfg, name=name)
scenario.properties['user'] = state.state_id
# update the scenario selector
update_scenario_selector(state)
state.selected_scenario = scenario
submit_scenario(state)
def catch_error_in_submit(state):
"""
This function is used to catch the error that can occur when we submit a scenario. When an
error is catched, a notification will appear and variables wil be changed to avoid any error.
The errors comes from the solution of the Cplex model where infeasible or unbounded problems
can happen if the fixed variables are wrongly set.
Args:
state (_type_): the state object of Taipy
"""
# if our initial production is higher that our max capacity of production
if state.fixed_variables["Initial_Production_FPA"] > state.fixed_variables["Max_Capacity_FPA"]:
state.fixed_variables["Initial_Production_FPA"] = state.fixed_variables["Max_Capacity_FPA"]
notify(
state,
"warning",
"Value of initial production FPA is greater than max production A")
# if our initial production is higher that our max capacity of production
if state.fixed_variables["Initial_Production_FPB"] > state.fixed_variables["Max_Capacity_FPB"]:
state.fixed_variables["Initial_Production_FPB"] = state.fixed_variables["Max_Capacity_FPB"]
notify(
state,
"warning",
"Value of initial production FPB is greater than max production B")
# if our initial stock is higher that our max capacity of production
if state.fixed_variables["Initial_Stock_RPone"] > state.fixed_variables["Max_Stock_RPone"]:
state.fixed_variables["Initial_Stock_RPone"] = state.fixed_variables["Max_Stock_RPone"]
notify(
state,
"warning",
"Value of initial stock RP1 is greater than max stock 1")
# if our initial stock is higher that our max capacity of production
if state.fixed_variables["Initial_Stock_RPtwo"] > state.fixed_variables["Max_Stock_RPtwo"]:
state.fixed_variables["Initial_Stock_RPtwo"] = state.fixed_variables["Max_Stock_RPtwo"]
notify(
state,
"warning",
"Value of initial stock RP2 is greater than max stock 2")
# if our initial productions are higher that our max capacity of
# productions
if state.fixed_variables["Initial_Production_FPA"] + \
state.fixed_variables["Initial_Production_FPB"] > state.fixed_variables["Max_Capacity_of_FPA_and_FPB"]:
state.fixed_variables["Initial_Production_FPA"] = int(state.fixed_variables["Max_Capacity_of_FPA_and_FPB"] / 2)
state.fixed_variables["Initial_Production_FPB"] = int(state.fixed_variables["Max_Capacity_of_FPA_and_FPB"] / 2)
notify(
state,
"warning",
"Value of initial productions is greater than the max capacities")
def submit_heavy(scenario):
tp.submit(scenario)
def submit_status(state, status):
update_variables(state)
def submit_scenario(state):
"""
This function will submit the scenario that is selected. It will be used when the 'submit' button is pressed
or when we create a new scenario. It checks if there is any errors then it will change the parameters of the
problem and submit the scenario. At the end, we update all the variables that we want to update.
Args:
state (_type_): the state object of Taipy
Returns:
_type_: _description_
"""
# see if there are errors in the parameters that will be given to the
# scenario
catch_error_in_submit(state)
# setting the scenario with the right parameters
old_fixed_variables = state.selected_scenario.fixed_variables.read()
if old_fixed_variables != state.fixed_variables._dict:
state.selected_scenario.fixed_variables.write(state.fixed_variables._dict)
# running the scenario in a long callback and update variables
invoke_long_callback(state, submit_heavy, [state.selected_scenario], submit_status)
def update_variables(state):
"""This function is only used in the submit_scenario or when the selected_scenario changes. It will update all the useful variables that we want to update.
Args:
state (_type_): the state object of Taipy
"""
# it will set the sliders to the right values when a scenario is changed
state.fixed_variables = state.selected_scenario.fixed_variables.read()
# read the result
state.sm_results = state.selected_scenario.results.read()
state.pie_results = pd.DataFrame(
{
"values": state.sm_results.sum(axis=0),
"labels": list(state.sm_results.columns)
})
state.sum_costs = state.sm_results['Total Cost'].sum()
bool_costs_of_stock = [c for c in state.sm_results.columns if 'Cost' in c and\
'Total' not in c and\
'Stock' in c]
state.sum_costs_of_stock = int(state.sm_results[bool_costs_of_stock].sum(axis=1)\
.sum(axis=0))
bool_costs_of_BO = [c for c in state.sm_results.columns if 'Cost' in c and\
'Total' not in c and\
'BO' in c]
state.sum_costs_of_BO = int(state.sm_results[bool_costs_of_BO].sum(axis=1)\
.sum(axis=0))
sm_scenario_manager_md = Markdown('pages/scenario_manager/scenario_manager.md')
|
# Create app for demo-movie-recommendation ReadMe.md
# Demo Movie Recommendation
## Usage
- [Usage](#usage)
- [Demo Movie Recommendation](#what-is-demo-movie-recommendation)
- [Directory Structure](#directory-structure)
- [License](#license)
- [Installation](#installation)
- [Contributing](#contributing)
- [Code of conduct](#code-of-conduct)
## What is Demo Movie Recommendation
Taipy is a Python library for creating Business Applications. More information on our
[website](https://www.taipy.io).
[Demo Movie Recommendation](https://github.com/Avaiga/demo-movie-recommendation) is a
full application showing how Taipy Core and Taipy Gui can work together to build a simple
but powerful application. This demo shows the basics of search a and recommendation
algorithms. The goal is to be able to search for films and recommend related/similar films.
These recommendations will use the user profile by tracking their session.
Get data [here](https://files.grouplens.org/datasets/movielens/ml-25m.zip).
### Demo Type
- **Level**: Advanced
- **Topic**: Taipy-GUI, Taipy-Core
- **Components/Controls**:
- Taipy GUI: input, selector, chart, expandable, table
- Taipy Core: datanode, pipeline, scenario
## How to run
This demo works with a Python version superior to 3.8. Install the dependencies of the
*requirements.txt* and run the *main.py*.
Get data [here](https://files.grouplens.org/datasets/movielens/ml-25m.zip).
## Introduction
A user has a userID generated when the user opens the app. Two pages are created to search and recommend films.
### Search page
- Be able to search for films
- List of films appears after search (selector of movies)
- Clicking on a movie will display a description of said movies, image, ratings, casting, date, ...
- Possibility to use Imdb api in real time to provide these information
- Recommendation on searched films (not based on syntax but on association with
this film/similar films Avengers ~ Batman)
- Recommendation depending on liked films of user
### User page
- Possibility to create Data Nodes for tracking/profiling:
- Selected films
- Viewed films
- Liked films
- Possibility to recommand a list of films based on selected/viewed/liked films (constraints on genres)
- Example: `np.mean([find_similar_movies(movie_id) for movie_id in liked_movies_id])`
- Deduce user profile (favourite genres, favourite period, ...)
- when a film is selected, add to the set of the user films (a datanode?)
## Directory Structure
- `src/`: Contains the demo source code.
- `src/algos`: Contains the functions to be executed as tasks by Taipy.
- `src/config`: Contains the configuration files.
- `src/data`: Contains the application data files.
- `src/pages`: Contains the page definition files.
- `CODE_OF_CONDUCT.md`: Code of conduct for members and contributors of _demo-movie-recommendation_.
- `CONTRIBUTING.md`: Instructions to contribute to _demo-movie-recommendation_.
- `INSTALLATION.md`: Instructions to install _demo-movie-recommendation_.
- `LICENSE`: The Apache 2.0 License.
- `Pipfile`: File used by the Pipenv virtual environment to manage project dependencies.
- `README.md`: Current file.
## License
Copyright 2022 Avaiga Private Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
[http://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
## Installation
Want to install _Demo Movie Recommendation_? Check out our
[`INSTALLATION.md`](INSTALLATION.md) file.
## Contributing
Want to help build _Demo Movie Recommendation_? Check out our
[`CONTRIBUTING.md`](CONTRIBUTING.md) file.
## Code of conduct
Want to be part of the _Demo Movie Recommendation_ community? Check out our
[`CODE_OF_CONDUCT.md`](CODE_OF_CONDUCT.md) file.
|
# Create app for demo-movie-recommendation main.py
from taipy.gui import Gui, Markdown, notify
from pages.search import page_search
from pages.user import page_user
pages = {"/":"<|navbar|>",
"search":page_search,
"user":page_user}
if __name__ == "__main__":
gui = Gui(pages=pages)
gui.run(port=5006)
|
# Create app for demo-movie-recommendation algos.py
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
def clean_title(title):
title = re.sub("[^a-zA-Z0-9 ]", "", title)
return title
def search(title):
title = clean_title(title)
query_vec = vectorizer.transform([title])
similarity = cosine_similarity(query_vec, tfidf).flatten()
indices = np.argpartition(similarity, -5)[-5:]
results = movies.iloc[indices].iloc[::-1]
return results
def find_similar_movies(movie_id):
similar_users = ratings[(ratings["movieId"] == movie_id) & (ratings["rating"] > 4)]["userId"].unique()
similar_user_recs = ratings[(ratings["userId"].isin(similar_users)) & (ratings["rating"] > 4)]["movieId"]
similar_user_recs = similar_user_recs.value_counts() / len(similar_users)
similar_user_recs = similar_user_recs[similar_user_recs > .10]
all_users = ratings[(ratings["movieId"].isin(similar_user_recs.index)) & (ratings["rating"] > 4)]
all_user_recs = all_users["movieId"].value_counts() / len(all_users["userId"].unique())
rec_percentages = pd.concat([similar_user_recs, all_user_recs], axis=1)
rec_percentages.columns = ["similar", "all"]
rec_percentages["score"] = rec_percentages["similar"] / rec_percentages["all"]
rec_percentages = rec_percentages.sort_values("score", ascending=False)
return rec_percentages.head(10).merge(movies, left_index=True, right_on="movieId")[["score", "title", "genres"]]
vectorizer = TfidfVectorizer(ngram_range=(1,2))
#movie_id = 89745
ratings = pd.read_csv('data/ratings.csv')
movies = pd.read_csv("data/movies.csv")
movies["clean_title"] = movies["title"].apply(clean_title)
tfidf = vectorizer.fit_transform(movies["clean_title"])
#print(search('Avengers'))
#start=time.time()
#print(find_similar_movies(movie_id))
#print(time.time()-start)
|
# Create app for demo-movie-recommendation user.py
from taipy.gui import Gui, notify, Markdown
page_user = Markdown("""
""")
|
# Create app for demo-movie-recommendation search.py
from taipy.gui import Gui, notify, Markdown
from algos.algos import clean_title, search
import pandas as pd
searched_film = ""
selected_film = ""
film_selector = [('','')]
page_search = Markdown("""
""")
|
# Create app for demo-sales-dashboard main.py
import pandas as pd
from taipy.gui import Gui, notify
# ---- READ EXCEL ----
df = pd.read_excel(
io="data/supermarkt_sales.xlsx",
engine="openpyxl",
sheet_name="Sales",
skiprows=3,
usecols="B:R",
nrows=1000,
)
# Add 'Hour' column to dataframe
df["Hour"] = pd.to_datetime(df["Time"], format="%H:%M:%S").dt.hour
city = cities = list(df["City"].unique())
customer_type = types = list(df["Customer_type"].unique())
gender = genders = list(df["Gender"].unique())
layout = {"margin": {"l": 220}}
page = """
<|toggle|theme|>
<|25 75|layout|gap=30px|
<|sidebar|
## Please **filter**{: .color-primary} here:
<|{city}|selector|lov={cities}|multiple|label=Select the City|dropdown|on_change=on_filter|class_name=fullwidth|>
<|{customer_type}|selector|lov={types}|multiple|label=Select the Customer Type|dropdown|on_change=on_filter|class_name=fullwidth|>
<|{gender}|selector|lov={genders}|multiple|label=Select the Gender|dropdown|on_change=on_filter|class_name=fullwidth|>
|>
<main_page|
# 📊 Sales **Dashboard**{: .color-primary}
<|1 1 1|layout|
<total_sales|
## **Total**{: .color-primary} sales:
US $ <|{int(df_selection["Total"].sum())}|>
|total_sales>
<average_rating|
## Average **Rating**{: .color-primary}:
<|{round(df_selection["Rating"].mean(), 1)}|> <|{"⭐" * int(round(round(df_selection["Rating"].mean(), 1), 0))}|>
|average_rating>
<average_sale|
## Average **Sales**{: .color-primary}:
US $ <|{round(df_selection["Total"].mean(), 2)}|>
|average_sale>
|>
<br/>
<|Sales Table|expandable|not expanded|
<|{df_selection}|table|page_size=5|>
|>
<|card p2|
<|{sales_by_hour}|chart|x=Hour|y=Total|type=bar|title=Sales by Hour|>
<|{sales_by_product_line}|chart|x=Total|y=Product line|type=bar|orientation=h|title=Sales by Product|layout={layout}|>
|>
Get the Taipy Code [here](https://github.com/Avaiga/demo-sales-dashboard) and the original code [here](https://github.com/Sven-Bo/streamlit-sales-dashboard)
|main_page>
|>
"""
def filter(city, customer_type, gender):
df_selection = df[
df["City"].isin(city)
& df["Customer_type"].isin(customer_type)
& df["Gender"].isin(gender)
]
# SALES BY PRODUCT LINE [BAR CHART]
sales_by_product_line = (
df_selection[["Product line", "Total"]]
.groupby(by=["Product line"])
.sum()[["Total"]]
.sort_values(by="Total")
)
sales_by_product_line["Product line"] = sales_by_product_line.index
# SALES BY HOUR [BAR CHART]
sales_by_hour = (
df_selection[["Hour", "Total"]].groupby(by=["Hour"]).sum()[["Total"]]
)
sales_by_hour["Hour"] = sales_by_hour.index
return df_selection, sales_by_product_line, sales_by_hour
def on_filter(state):
if len(state.city) == 0 or len(state.customer_type) == 0 or len(state.gender) == 0:
notify(state, "Error", "No results found. Check the filters.")
return
state.df_selection, state.sales_by_product_line, state.sales_by_hour = filter(
state.city, state.customer_type, state.gender
)
if __name__ == "__main__":
df_selection, sales_by_product_line, sales_by_hour = filter(
city, customer_type, gender
)
Gui(page).run(margin="0em", title="Sales Dashboard")
|
# Create app for demo-sentiment-analysis main.py
""" Creates a sentiment analysis App using Taipy"""
from transformers import AutoTokenizer
from transformers import AutoModelForSequenceClassification
from scipy.special import softmax
import numpy as np
import pandas as pd
from taipy.gui import Gui, notify
text = "Original text"
page = """
# Getting started with **Taipy**{: .color-primary} **GUI**{: .color-primary}
<|layout|columns=1 1|
<|
**My text:** <|{text}|>
**Enter a word:**
<|{text}|input|>
<|Analyze|button|on_action=local_callback|>
|>
<|Table|expandable|
<|{dataframe}|table|width=100%|number_format=%.2f|>
|>
|>
<|layout|columns=1 1 1|
## Positive <|{np.mean(dataframe['Score Pos'])}|text|format=%.2f|raw|>
## Neutral <|{np.mean(dataframe['Score Neu'])}|text|format=%.2f|raw|>
## Negative <|{np.mean(dataframe['Score Neg'])}|text|format=%.2f|raw|>
|>
<|{dataframe}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|>
"""
MODEL = "sbcBI/sentiment_analysis_model"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForSequenceClassification.from_pretrained(MODEL)
dataframe = pd.DataFrame(
{
"Text": [""],
"Score Pos": [0.33],
"Score Neu": [0.33],
"Score Neg": [0.33],
"Overall": [0],
}
)
dataframe2 = dataframe.copy()
def analyze_text(input_text: str) -> dict:
"""
Runs the sentiment analysis model on the text
Args:
- text (str): text to be analyzed
Returns:
- dict: dictionary with the scores
"""
encoded_text = tokenizer(input_text, return_tensors="pt")
output = model(**encoded_text)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
return {
"Text": input_text[:50],
"Score Pos": scores[2],
"Score Neu": scores[1],
"Score Neg": scores[0],
"Overall": scores[2] - scores[0],
}
def local_callback(state) -> None:
"""
Analyze the text and updates the dataframe
Args:
- state: state of the Taipy App
"""
notify(state, "Info", f"The text is: {state.text}", True)
temp = state.dataframe.copy()
scores = analyze_text(state.text)
state.dataframe = temp.append(scores, ignore_index=True)
state.text = ""
path = ""
treatment = 0
page_file = """
<|{path}|file_selector|extensions=.txt|label=Upload .txt file|on_action=analyze_file|> <|{f'Downloading {treatment}%...'}|>
<br/>
<|Table|expandable|
<|{dataframe2}|table|width=100%|number_format=%.2f|>
|>
<br/>
<|{dataframe2}|chart|type=bar|x=Text|y[1]=Score Pos|y[2]=Score Neu|y[3]=Score Neg|y[4]=Overall|color[1]=green|color[2]=grey|color[3]=red|type[4]=line|height=600px|>
"""
def analyze_file(state) -> None:
"""
Analyse the lines in a text file
Args:
- state: state of the Taipy App
"""
state.dataframe2 = dataframe2
state.treatment = 0
with open(state.path, "r", encoding="utf-8") as f:
data = f.read()
print(data)
file_list = list(data.split("\n"))
for i, input_text in enumerate(file_list):
state.treatment = int((i + 1) * 100 / len(file_list))
temp = state.dataframe2.copy()
scores = analyze_text(input_text)
print(scores)
state.dataframe2 = temp.append(scores, ignore_index=True)
state.path = None
pages = {
"/": "<|toggle|theme|>\n<center>\n<|navbar|>\n</center>",
"line": page,
"text": page_file,
}
Gui(pages=pages).run(title="Sentiment Analysis")
|
# Create app for demo-template test_main.py
|
# Create app for demo-template test_config.py
# Please insert your unit tests of config code here.
|
# Create app for demo-template test_algo1.py
# Please insert your unit tests of algos code here.
#
# Here is an example:
#
#
# from .algos.algo1 import algo1_first_function
#
# def test_algo1():
# assert algo1_first_function() == 10
|
# Create app for demo-template main.py
# Please insert your main code here.
|
# Create app for demo-template config.py
from taipy import Config
# Please insert your configuration here.
#
# Here is an example:
#
# conf_path = Path('my', 'config', 'path', 'taipy-config.toml')
# Config.load(str(conf_path))
#
# first_data_node_config = Config.configure_data_node(...)
# second_data_node_config = Config.configure_data_node(...)
#
# task_config = Config.configure_task(...)
#
# scenario_config = Config.configure_scenario(...)
|
# Create app for demo-template algo2.py
# Please insert your algo as python functions to be translated into taipy tasks/pipelines here.
#
# Here is an example:
#
# def algo2_(params):
# print("this is my algo 2 function")
# #
|
# Create app for demo-template algo1.py
# Please insert your python functions to be translated into taipy tasks here.
#
# Here is an example:
#
# def algo1_first_function(params):
# print("this is my first function")
# if __sub_function(params):
# return 10
# else:
# return 20
#
#
# def __sub_function(params):
# print("this is an internal sub_function")
# return True
#
#
# def algo1_second_function(other_param: int):
# print("this is my second function")
# return other_param + 1
|
# Create app for demo-template page1.py
# Please insert your code dedicated to pages here.
#
|
# Create app for demo-image-classification demo-image_classifcation-taipy-cloud.py
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
class_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train / 255.0
y_train = to_categorical(y_train, len(class_names))
x_test = x_test / 255.0
y_test = to_categorical(y_test, len(class_names))
#########################################################################################################
def create_model():
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
df = pd.read_csv("saved_models/df.csv")
df["N_Epochs"] = range(1,len(df)+1)
#STATE VARIABLES
model = None
# Parameters for models & training
epochs = 1
input_model_name = "model"
# Parameters for trained model
trained_model_path = ""
# Parameters for CIFAR dataset
cifar_image_index = 10
cifar_image_path = "images/sample/taipy.jpg"
cifar_predicted_label = 'NA'
cifar_true_label = 'NA'
# Parameters for online image
online_image_url = "URL"
online_image_path = "images/sample/airplane.jpg"
online_image_count = 0
online_image_predicted_label = 'NA' # predicted label for the online image
#P1
from taipy import Gui
from taipy.gui import invoke_long_callback, notify
import urllib
p1 = """
<center><h1>Image Classification CNN</h1></center>
<|layout|columns=1 3|
<|
## PARAMETERS
Enter chosen optimal numper of epochs:
<|{epochs}|input|>
Register model name:
<|{input_model_name}|input|>
Train the model with the Training + Validation sets:
<|START TRAINING|button|on_action=train_button|>
### Upload Trained Model
<|{trained_model_path}|file_selector|label=Upload trained model|on_action=load_trained_model|extensions=.h5|>
|>
<|
<center><h2> Val_loss and Accuracy </h2></center>
<|{df}|chart|x=N_Epochs|y[1]=accuracy|y[2]=val_accuracy|>
|>
|>
___
"""
def merged_train(model,number_of_epochs,name):
# merge the training and validation sets
#x_all = np.concatenate((x_train, x_test))
#y_all = np.concatenate((y_train, y_test))
# train with the merged dataset
#history = model.fit(
# datagen.flow(x_all, y_all, batch_size=64),
# epochs=number_of_epochs)
#model.save("saved_models/{}.h5".format(name),save_format='h5')
print("TRAINING & SAVING COMPLETED!")
def train_button(state):
notify(state, "info", "Started training model with {} epochs".format(state.epochs), True, 1000)
#model = create_model()
invoke_long_callback(state,merged_train,[model, int(state.epochs), state.input_model_name])
def load_trained_model(state):
loaded_model = tf.keras.models.load_model(state.trained_model_path)
state.model = loaded_model
#Second half of the applications
p2 = """
<|layout|columns=1 3|
<|
### CIFAR10 Images Prediction
Enter CIFAR10 image index: |
<|{cifar_image_index}|input|>
<|PREDICT CIFAR IMAGE|button|on_action=predict_cifar_image|>
<|{cifar_image_path}|image|height=100px|width=100px|>
##Predicted label: <|{cifar_predicted_label}|>
##True label: <|{cifar_true_label}|>
|>
<|
###Paste an online image link here for prediction:
<|{online_image_url}|input|on_action=load_online_image|>
<center> <|{online_image_path}|image|height=300px|width=300px|> </center>
<|PREDICT ONLINE IMAGE|button|on_action=predict_online_image|>
## Predicted label: <|{online_image_predicted_label }|>
|>
|>
"""
def predict_cifar_image(state):
#Retrieve the cifar image at the specified index and save as PIL Image obj
cifar_img_idx = int(state.cifar_image_index )
cifar_img_data = x_test[cifar_img_idx]
cifar_img = Image.fromarray(np.uint8(cifar_img_data*255))
cifar_img.save("images/cifar10_saved/{}.jpg".format(cifar_img_idx))
#Predict the label of the CIFAR image
img_for_pred = np.expand_dims(x_test[cifar_img_idx], axis=0)
cifar_img_pred_label = np.argmax(state.model.predict(img_for_pred))
cifar_img_true_label = y_test[cifar_img_idx].argmax()
#Update the GUI
state.cifar_image_path = "images/cifar10_saved/{}.jpg".format(cifar_img_idx)
state.cifar_predicted_label = str(class_names[cifar_img_pred_label])
state.cifar_true_label = str(class_names[cifar_img_true_label])
def load_online_image(state):
urllib.request.urlretrieve(state.online_image_url, "images/online_image.jpg")
state.online_image_path = "images/online_image.jpg"
def predict_online_image(state):
#Retrieve & save online image in order to show on the image box
urllib.request.urlretrieve(state.online_image_url , "images/saved_images/{}.jpg".format(state.online_image_count))
state.online_image_path = "images/saved_images/{}.jpg".format(state.online_image_count)
#Predict the label of the online image
img_array = tf.keras.utils.load_img(state.online_image_path, target_size=(32, 32))
image = tf.keras.utils.img_to_array(img_array) # (height, width, channels)
image = np.expand_dims(image, axis=0) / 255. # (1, height, width, channels) + normalize
#Update the GUI
state.online_image_predicted_label = class_names[np.argmax(state.model.predict(image))]
state.online_image_count += 1
Gui(page=p1+p2).run(dark_mode=False)
|
# Create app for demo-image-classification main.py
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
class_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train / 255.0
y_train = to_categorical(y_train, len(class_names))
x_test = x_test / 255.0
y_test = to_categorical(y_test, len(class_names))
#########################################################################################################
def create_model():
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
df = pd.read_csv("saved_models/df.csv")
df["N_Epochs"] = range(1,len(df)+1)
#STATE VARIABLES
model = None
# Parameters for models & training
epochs = 1
input_model_name = "model"
# Parameters for trained model
trained_model_path = ""
# Parameters for CIFAR dataset
cifar_image_index = 10
cifar_image_path = "images/sample/taipy.jpg"
cifar_predicted_label = 'NA'
cifar_true_label = 'NA'
# Parameters for online image
online_image_url = "URL"
online_image_path = "images/sample/airplane.jpg"
online_image_count = 0
online_image_predicted_label = 'NA' # predicted label for the online image
#P1
from taipy import Gui
from taipy.gui import invoke_long_callback, notify
import urllib
p1 = """
<center><h1>Image Classification CNN</h1></center>
<|layout|columns=1 3|
<|
## PARAMETERS
Enter chosen optimal numper of epochs:
<|{epochs}|input|>
Register model name:
<|{input_model_name}|input|>
Train the model with the Training + Validation sets:
<|START TRAINING|button|on_action=train_button|>
### Upload Trained Model
<|{trained_model_path}|file_selector|label=Upload trained model|on_action=load_trained_model|extensions=.h5|>
|>
<|
<center><h2> Val_loss and Accuracy </h2></center>
<|{df}|chart|x=N_Epochs|y[1]=accuracy|y[2]=val_accuracy|>
|>
|>
___
"""
def merged_train(model,number_of_epochs,name):
# merge the training and validation sets
#x_all = np.concatenate((x_train, x_test))
#y_all = np.concatenate((y_train, y_test))
# train with the merged dataset
#history = model.fit(
# datagen.flow(x_all, y_all, batch_size=64),
# epochs=number_of_epochs)
#model.save("saved_models/{}.h5".format(name),save_format='h5')
print("TRAINING & SAVING COMPLETED!")
def train_button(state):
notify(state, "info", "Started training model with {} epochs".format(state.epochs), True, 1000)
#model = create_model()
invoke_long_callback(state,merged_train,[model, int(state.epochs), state.input_model_name])
def load_trained_model(state):
loaded_model = tf.keras.models.load_model(state.trained_model_path)
state.model = loaded_model
#Second half of the applications
p2 = """
<|layout|columns=1 3|
<|
### CIFAR10 Images Prediction
Enter CIFAR10 image index: |
<|{cifar_image_index}|input|>
<|PREDICT CIFAR IMAGE|button|on_action=predict_cifar_image|>
<|{cifar_image_path}|image|height=100px|width=100px|>
##Predicted label: <|{cifar_predicted_label}|>
##True label: <|{cifar_true_label}|>
|>
<|
###Paste an online image link here for prediction:
<|{online_image_url}|input|on_action=load_online_image|>
<center> <|{online_image_path}|image|height=300px|width=300px|> </center>
<|PREDICT ONLINE IMAGE|button|on_action=predict_online_image|>
## Predicted label: <|{online_image_predicted_label }|>
|>
|>
"""
def predict_cifar_image(state):
#Retrieve the cifar image at the specified index and save as PIL Image obj
cifar_img_idx = int(state.cifar_image_index )
cifar_img_data = x_test[cifar_img_idx]
cifar_img = Image.fromarray(np.uint8(cifar_img_data*255))
cifar_img.save("images/cifar10_saved/{}.jpg".format(cifar_img_idx))
#Predict the label of the CIFAR image
img_for_pred = np.expand_dims(x_test[cifar_img_idx], axis=0)
cifar_img_pred_label = np.argmax(state.model.predict(img_for_pred))
cifar_img_true_label = y_test[cifar_img_idx].argmax()
#Update the GUI
state.cifar_image_path = "images/cifar10_saved/{}.jpg".format(cifar_img_idx)
state.cifar_predicted_label = str(class_names[cifar_img_pred_label])
state.cifar_true_label = str(class_names[cifar_img_true_label])
def load_online_image(state):
urllib.request.urlretrieve(state.online_image_url, "images/online_image.jpg")
state.online_image_path = "images/online_image.jpg"
def predict_online_image(state):
#Retrieve & save online image in order to show on the image box
urllib.request.urlretrieve(state.online_image_url , "images/saved_images/{}.jpg".format(state.online_image_count))
state.online_image_path = "images/saved_images/{}.jpg".format(state.online_image_count)
#Predict the label of the online image
img_array = tf.keras.utils.load_img(state.online_image_path, target_size=(32, 32))
image = tf.keras.utils.img_to_array(img_array) # (height, width, channels)
image = np.expand_dims(image, axis=0) / 255. # (1, height, width, channels) + normalize
#Update the GUI
state.online_image_predicted_label = class_names[np.argmax(state.model.predict(image))]
state.online_image_count += 1
Gui(page=p1+p2).run(dark_mode=False)
|
# Create app for demo-drift-detection main.py
import taipy as tp
from taipy.gui import Gui
import pandas as pd
from configuration.config import scenario_cfg
from pages import *
from pages.Drift.Drift import merge_data
if __name__ == "__main__":
ref_data = pd.read_csv("data/data_ref.csv")
tp.Core().run()
scenario = tp.create_scenario(scenario_cfg)
ref_selected = "data_ref"
compare_selected = "data_noisy"
ref_data = pd.read_csv("data/" + ref_selected + ".csv")
scenario.reference_data.write(ref_data)
compare_data = pd.read_csv("data/" + compare_selected + ".csv")
scenario.compare_data.write(compare_data)
bp_data, sex_data = merge_data(ref_data, compare_data)
gui = Gui(page=Drift)
gui.run(title="Drift Detection")
|
# Create app for demo-drift-detection config.py
"""
Contain the application's configuration including the scenario configurations.
The configuration is run by the Core service.
"""
from algorithms.algorithms import *
from taipy import Config
reference_data_cfg = Config.configure_data_node("reference_data", "csv")
compare_data_cfg = Config.configure_data_node("compare_data", "csv")
num_cols_cfg = Config.configure_data_node("num_cols")
cat_cols_cfg = Config.configure_data_node("cat_cols")
num_results_cfg = Config.configure_data_node("num_results")
cat_results_cfg = Config.configure_data_node("cat_results")
drift_results_cfg = Config.configure_data_node("drift_results")
detect_numerical_cfg = Config.configure_task(
id="detect_numerical",
function=detect_numerical,
input=[reference_data_cfg],
output=num_cols_cfg,
)
detect_categorical_cfg = Config.configure_task(
id="detect_categorical",
function=detect_categorical,
input=[reference_data_cfg],
output=cat_cols_cfg,
)
kolmogorov_cfg = Config.configure_task(
id="kolmogorov",
function=kolmogorov,
input=[compare_data_cfg, reference_data_cfg, num_cols_cfg],
output=num_results_cfg,
)
chi_squared_cfg = Config.configure_task(
id="chi_squared",
function=chi_squared,
input=[compare_data_cfg, reference_data_cfg, cat_cols_cfg],
output=cat_results_cfg,
)
collect_results_cfg = Config.configure_task(
id="collect_results",
function=collect_results,
input=[num_results_cfg, cat_results_cfg],
output=drift_results_cfg,
)
scenario_cfg = Config.configure_scenario(
id="drift_detection",
task_configs=[
detect_numerical_cfg,
detect_categorical_cfg,
kolmogorov_cfg,
chi_squared_cfg,
collect_results_cfg,
],
)
|
# Create app for demo-drift-detection __init__.py
from .config import *
|
# Create app for demo-drift-detection algorithms.py
"""
This file is designed to contain the various Python functions used to configure tasks.
The functions will be imported by the __init__.py file in this folder.
"""
import pandas as pd
import scipy.stats as stats
def detect_categorical(dataset: pd.DataFrame) -> list:
"""
Detect the names of categorical columns in a dataframe.
Args:
dataset: The dataframe to detect categorical columns from.
Returns:
A list of categorical column names.
"""
categorical = []
for col in dataset.columns:
if dataset[col].dtype == "object":
categorical.append(col)
return categorical
def detect_numerical(dataset: pd.DataFrame) -> list:
"""
Detect the names of numerical columns in a dataframe.
Args:
dataset: The dataframe to detect numerical columns from.
Returns:
A list of numerical column names.
"""
numerical = []
for col in dataset.columns:
if dataset[col].dtype != "object":
numerical.append(col)
return numerical
def ks_2samp(series_1: pd.Series, series_2: pd.Series) -> float:
"""
Runs the two-sample Kolmogorov-Smirnov test on two series.
Args:
series_1: The first series.
series_2: The second series.
Returns:
The p-value of the test.
"""
analysis = stats.ks_2samp(series_1, series_2)
return int(analysis[1] * 100) / 100
def kolmogorov(
dataset: pd.DataFrame, ref_dataset: pd.DataFrame, num_cols: list
) -> dict:
"""
Runs the two-sample Kolmogorov-Smirnov test on all numerical columns in a dataframe.
Args:
dataset: The dataframe to run the test on.
ref_dataset: The reference dataframe to compare against.
num_cols: The list of numerical column names.
Returns:
A dictionary of test statistics.
"""
ks_dict = {}
for col in num_cols:
ks_dict[col] = ks_2samp(dataset[col], ref_dataset[col])
return ks_dict
def chi_squared_2samp(series_1: pd.Series, series_2: pd.Series) -> float:
"""
Runs the two-sample chi-squared test on two series.
Args:
series_1: The first series.
series_2: The second series.
Returns:
The p-value of the test.
"""
# Get the unique values
series_1_unique = series_1.unique()
series_2_unique = series_2.unique()
# Get the frequencies
series_1_freq = series_1.value_counts()
series_2_freq = series_2.value_counts()
# Get the expected frequencies
series_1_exp_freq = []
series_2_exp_freq = []
for i, _ in enumerate(series_1_unique):
series_1_exp_freq.append(
series_1_freq[series_1_unique[i]] * len(series_2) / len(series_1)
)
for i, _ in enumerate(series_2_unique):
series_2_exp_freq.append(
series_2_freq[series_2_unique[i]] * len(series_1) / len(series_2)
)
analysis = stats.chisquare(series_1_exp_freq, series_2_exp_freq)
return int(analysis[1] * 100) / 100
def chi_squared(
dataset: pd.DataFrame, ref_dataset: pd.DataFrame, cat_cols: list
) -> dict:
"""
Runs the chi-squared test on all categorical columns in a dataframe.
Args:
dataset: The dataframe to run the test on.
ref_dataset: The reference dataframe to compare against.
cat_cols: The list of categorical column names.
Returns:
A dictionary of test statistics.
"""
chi_dict = {}
for col in cat_cols:
chi_dict[col] = chi_squared_2samp(dataset[col], ref_dataset[col])
return chi_dict
def collect_results(num_results: dict, cat_results: dict) -> pd.DataFrame:
"""
Collects the results of the two tests into a single dictionary.
Args:
num_results: The dictionary of numerical test results.
cat_results: The dictionary of categorical test results.
Returns:
A dataframe of the results.
"""
columns = []
tests = []
values = []
detected = []
for col in cat_results:
columns.append(col)
tests.append("Chi-Squared")
values.append(cat_results[col])
if cat_results[col] < 0.05:
detected.append(True)
else:
detected.append(False)
for col in num_results:
columns.append(col)
tests.append("Kolmogorov-Smirnov")
values.append(num_results[col])
if num_results[col] < 0.05:
detected.append(True)
else:
detected.append(False)
results = pd.DataFrame(
{"Column": columns, "Test": tests, "p-value": values, "Drift": detected}
)
return results
def merge_data(ref_data: pd.DataFrame, compare_data: pd.DataFrame):
"""
Merges the reference and comparison data into a single dataframe.
The Dataframe is prepared for plotting.
Args:
ref_data: The reference data.
compare_data: The comparison data.
Returns:
plot_data: The dataset for other columns.
sex_data: The dataset for sex distribution.
"""
bp_data = [
{"Blood Pressure": list(ref_data["blood_pressure"])},
{"Blood Pressure": list(compare_data["blood_pressure"])},
]
# Count the Male and Female rows in ref and compare
male_ref = ref_data[ref_data["sex"] == "Male"].shape[0]
male_compare = compare_data[compare_data["sex"] == "Male"].shape[0]
female_ref = ref_data[ref_data["sex"] == "Female"].shape[0]
female_compare = compare_data[compare_data["sex"] == "Female"].shape[0]
sex_data = pd.DataFrame(
{
"Dataset": ["Ref", "Compare"],
"Male": [male_ref, male_compare],
"Female": [female_ref, female_compare],
}
)
return bp_data, sex_data
|
# Create app for demo-drift-detection __init__.py
from algorithms import *
|
# Create app for demo-drift-detection root.md
<center>
<|navbar|>
</center>
|
# Create app for demo-drift-detection __init__.py
from .root import root_page
from .Drift.Drift import Drift
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.