Dataset Viewer
text
stringlengths 44
15.3k
|
---|
# Create app to read and display data from Excel file import pandas as pd from taipy import Gui # ---- READ EXCEL ---- df = pd.read_excel( io="data/supermarkt_sales.xlsx", engine="openpyxl", sheet_name="Sales", skiprows=3, usecols="B:R", nrows=1000, ) # Add 'hour' column to dataframe df["hour"] = pd.to_datetime(df["Time"], format="%H:%M:%S").dt.hour # initialization of variables cities = list(df["City"].unique()) types = list(df["Customer_type"].unique()) genders = list(df["Gender"].unique()) city = cities customer_type = types gender = genders layout = {"margin": {"l": 220}} # Markdown for the entire page ## NOTE: {: .orange} references a color from main.css use to style my text ## <text| ## |text> ## "text" here is just a name given to my part/my section ## it has no meaning in the code page = """<|toggle|theme|> <|layout|columns=20 80|gap=30px| <sidebar| ## Please **filter**{: .orange} here: <|{city}|selector|lov={cities}|multiple|label=Select the City|dropdown|on_change=on_filter|width=100%|> <|{customer_type}|selector|lov={types}|multiple|label=Select the Customer Type|dropdown|on_change=on_filter|width=100%|> <|{gender}|selector|lov={genders}|multiple|label=Select the Gender|dropdown|on_change=on_filter|width=100%|> |sidebar> <main_page| # 📊 **Sales**{: .orange} Dashboard <|layout|columns=1 1 1| <total_sales| ## **Total**{: .orange} sales: ### US $ <|{int(df_selection["Total"].sum())}|> |total_sales> <average_rating| ## **Average**{: .orange} Rating: ### <|{round(df_selection["Rating"].mean(), 1)}|> <|{"⭐" * int(round(round(df_selection["Rating"].mean(), 1), 0))}|> |average_rating> <average_sale| ## Average Sales Per **Transaction**{: .orange}: ### US $ <|{round(df_selection["Total"].mean(), 2)}|> |average_sale> |> <br/> Display df_selection in an expandable <|Sales Table|expandable|expanded=False| <|{df_selection}|table|width=100%|page_size=5|rebuild|class_name=table|> |> <charts| <|{sales_by_hour}|chart|x=Hour|y=Total|type=bar|title=Sales by Hour|color=#ff462b|> <|{sales_by_product_line}|chart|x=Total|y=Product|type=bar|orientation=h|title=Sales by Product|layout={layout}|color=#ff462b|> |charts> |main_page> |> Code from [Coding is Fun](https://github.com/Sven-Bo) Get the Taipy Code [here](https://github.com/Avaiga/demo-sales-dashboard) and the original code [here](https://github.com/Sven-Bo/streamlit-sales-dashboard) """ def filter(city, customer_type, gender): df_selection = df[ df["City"].isin(city) & df["Customer_type"].isin(customer_type) & df["Gender"].isin(gender) ] # SALES BY PRODUCT LINE [BAR CHART] sales_by_product_line = ( df_selection[["Product line", "Total"]] .groupby(by=["Product line"]) .sum()[["Total"]] .sort_values(by="Total") ) sales_by_product_line["Product"] = sales_by_product_line.index # SALES BY HOUR [BAR CHART] sales_by_hour = ( df_selection[["hour", "Total"]].groupby(by=["hour"]).sum()[["Total"]] ) sales_by_hour["Hour"] = sales_by_hour.index return df_selection, sales_by_product_line, sales_by_hour def on_filter(state): state.df_selection, state.sales_by_product_line, state.sales_by_hour = filter( state.city, state.customer_type, state.gender ) if __name__ == "__main__": # initialize dataframes df_selection, sales_by_product_line, sales_by_hour = filter( city, customer_type, gender ) # run the app Gui(page).run()
|
# Create an app with slider and chart from taipy.gui import Gui from math import cos, exp value = 10 page = """ Markdown # Taipy *Demo* Value: <|{value}|text|> <|{value}|slider|on_change=on_slider|> <|{data}|chart|> """ def compute_data(decay:int)->list: return [cos(i/6) * exp(-i*decay/600) for i in range(100)] def on_slider(state): state.data = compute_data(state.value) data = compute_data(value) Gui(page).run(use_reloader=True, port=5002)
|
# Create app to predict covid in the world from taipy.gui import Gui import taipy as tp from pages.country.country import country_md from pages.world.world import world_md from pages.map.map import map_md from pages.predictions.predictions import predictions_md, selected_scenario from pages.root import root, selected_country, selector_country from config.config import Config pages = { '/':root, "Country":country_md, "World":world_md, "Map":map_md, "Predictions":predictions_md } gui_multi_pages = Gui(pages=pages) if __name__ == '__main__': tp.Core().run() gui_multi_pages.run(title="Covid Dashboard")
|
# Create app for finance data analysis import yfinance as yf from taipy.gui import Gui from taipy.gui.data.decimator import MinMaxDecimator, RDP, LTTB df_AAPL = yf.Ticker("AAPL").history(interval="1d", period="100Y") df_AAPL["DATE"] = df_AAPL.index.astype("int64").astype(float) n_out = 500 decimator_instance = MinMaxDecimator(n_out=n_out) decimate_data_count = len(df_AAPL) page = """ # Decimator From a data length of <|{len(df_AAPL)}|> to <|{n_out}|> ## Without decimator <|{df_AAPL}|chart|x=DATE|y=Open|> ## With decimator <|{df_AAPL}|chart|x=DATE|y=Open|decimator=decimator_instance|> """ gui = Gui(page) gui.run(port=5026)
|
# Create an app to upload a csv and display it in a table from taipy.gui import Gui import pandas as pd data = [] data_path = "" def data_upload(state): state.data = pd.read_csv(state.data_path) page = """ <|{data_path}|file_selector|on_action=data_upload|> <|{data}|table|> """ Gui(page).run()
|
# Create an app to visualize sin and amp with slider and chart from taipy.gui import Gui from math import cos, exp state = {"amp": 1, "data":[]} def update(state): x = [i/10 for i in range(100)] y = [math.sin(i)*state.amp for i in x] state.data = [{"data": y}] page = """ Amplitude: <|{amp}|slider|> <|Data|chart|data={data}|> """ Gui(page).run(state=state)
|
# Create an app to visualize sin, cos with slider and chart from taipy.gui import Gui from math import sin, cos, pi state = { "frequency": 1, "decay": 0.01, "data": [] } page = """ # Sine and Cosine Functions Frequency: <|{frequency}|slider|min=0|max=10|step=0.1|on_change=update|> Decay: <|{decay}|slider|min=0|max=1|step=0.01|on_change=update|> <|Data|chart|data={data}|> """ def update(state): x = [i/10 for i in range(100)] y1 = [sin(i*state.frequency*2*pi) * exp(-i*state.decay) for i in x] y2 = [cos(i*state.frequency*2*pi) * exp(-i*state.decay) for i in x] state.data = [ {"name": "Sine", "data": y1}, {"name": "Cosine", "data": y2} ] Gui(page).run(use_reloader=True, state=state)
|
# Create app to visualize country population import numpy as np import pandas as pd from taipy.gui import Markdown from data.data import data selected_country = 'France' data_country_date = None representation_selector = ['Cumulative', 'Density'] selected_representation = representation_selector[0] layout = {'barmode':'stack', "hovermode":"x"} options = {"unselected":{"marker":{"opacity":0.5}}} def initialize_case_evolution(data, selected_country='France'): # Aggregation of the dataframe to erase the regions that will not be used here data_country_date = data.groupby(["Country/Region",'Date'])\ .sum()\ .reset_index() # a country is selected, here France by default data_country_date = data_country_date.loc[data_country_date['Country/Region']==selected_country] return data_country_date data_country_date = initialize_case_evolution(data) pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"],"values": [data_country_date.iloc[-1, 6], data_country_date.iloc[-1, 5], data_country_date.iloc[-1, 4]]}) def convert_density(state): if state.selected_representation == 'Density': df_temp = state.data_country_date.copy() df_temp['Deaths'] = df_temp['Deaths'].diff().fillna(0) df_temp['Recovered'] = df_temp['Recovered'].diff().fillna(0) df_temp['Confirmed'] = df_temp['Confirmed'].diff().fillna(0) state.data_country_date = df_temp else: state.data_country_date = initialize_case_evolution(data, state.selected_country) def on_change_country(state): # state contains all the Gui variables and this is through this state variable that we can update the Gui # state.selected_country, state.data_country_date, ... # update data_country_date with the right country (use initialize_case_evolution) print("Chosen country: ", state.selected_country) state.data_country_date = initialize_case_evolution(data, state.selected_country) state.pie_chart = pd.DataFrame({"labels": ["Deaths", "Recovered", "Confirmed"], "values": [state.data_country_date.iloc[-1, 6], state.data_country_date.iloc[-1, 5], state.data_country_date.iloc[-1, 4]]}) convert_density(state) page =""" # **Country**{: .color-primary} Statistics <|layout|columns=1 1 1| <|{selected_country}|selector|lov={selector_country}|on_change=on_change_country|dropdown|label=Country|> <|{selected_representation}|toggle|lov={representation_selector}|on_change=convert_density|> |> <br/> <|layout|columns=1 1 1 1|gap=50px| <|card| **Deaths**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Deaths'])).replace(',', ' ')}|text|class_name=h2|> |> <|card| **Recovered**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Recovered'])).replace(',', ' ')}|text|class_name=h2|> |> <|card| **Confirmed**{: .color-primary} <|{'{:,}'.format(int(data_country_date.iloc[-1]['Confirmed'])).replace(',', ' ')}|text|class_name=h2|> |> |> <br/> <|layout|columns=2 1| <|{data_country_date}|chart|type=bar|x=Date|y[3]=Deaths|y[2]=Recovered|y[1]=Confirmed|layout={layout}|options={options}|title=Covid Evolution|> <|{pie_chart}|chart|type=pie|values=values|labels=labels|title=Distribution between cases|> |> """ Gui(page).run(use_reloader=True, state=state)
|
# Create Taipy app to generate mandelbrot fractals from taipy import Gui import numpy as np from PIL import Image import matplotlib.pyplot as plt WINDOW_SIZE = 500 cm = plt.cm.get_cmap("viridis") def generate_mandelbrot( center: int = WINDOW_SIZE / 2, dx_range: int = 1000, dx_start: float = -0.12, dy_range: float = 1000, dy_start: float = -0.82, iterations: int = 50, max_value: int = 200, i: int = 0, ) -> str: mat = np.zeros((WINDOW_SIZE, WINDOW_SIZE)) for y in range(WINDOW_SIZE): for x in range(WINDOW_SIZE): dx = (x - center) / dx_range + dx_start dy = (y - center) / dy_range + dy_start a = dx b = dy for t in range(iterations): d = (a * a) - (b * b) + dx b = 2 * (a * b) + dy a = d h = d > max_value if h is True: mat[x, y] = t colored_mat = cm(mat / mat.max()) im = Image.fromarray((colored_mat * 255).astype(np.uint8)) path = f"mandelbrot_{i}.png" im.save(path) return path def generate(state): state.i = state.i + 1 state.path = generate_mandelbrot( dx_start=-state.dx_start / 100, dy_start=(state.dy_start - 100) / 100, iterations=state.iterations, i=state.i, ) i = 0 dx_start = 11 dy_start = 17 iterations = 50 path = generate_mandelbrot( dx_start=-dx_start / 100, dy_start=(dy_start - 100) / 100, ) page = """ # Mandelbrot Generator <|layout|columns=35 65| Display image from path <|{path}|image|width=500px|height=500px|class_name=img|> Iterations:<br /> Create a slider to select iterations <|{iterations}|slider|min=10|max=50|continuous=False|on_change=generate|><br /> X Position:<br /> <|{dy_start}|slider|min=0|max=100|continuous=False|on_change=generate|><br /> Y Position:<br /> Slider dx_start <|{dx_start}|slider|min=0|max=100|continuous=False|on_change=generate|><br /> |> """ Gui(page).run(title="Mandelbrot Generator")
|
# Create app to auto generate Tweeter status import logging import random import re # Import from 3rd party libraries from taipy.gui import Gui, notify, state import taipy # Import modules import oai # Configure logger logging.basicConfig(format="\n%(asctime)s\n%(message)s", level=logging.INFO, force=True) def error_prompt_flagged(state, prompt): """Notify user that a prompt has been flagged.""" notify(state, "error", "Prompt flagged as inappropriate.") logging.info(f"Prompt flagged as inappropriate: {prompt}") def error_too_many_requests(state): """Notify user that too many requests have been made.""" notify( state, "error", "Too many requests. Please wait a few seconds before generating another text or image.", ) logging.info(f"Session request limit reached: {state.n_requests}") state.n_requests = 1 # Define functions def generate_text(state): """Generate Tweet text.""" state.tweet = "" state.image = None # Check the number of requests done by the user if state.n_requests >= 5: error_too_many_requests(state) return # Check if the user has put a topic if state.topic == "": notify(state, "error", "Please enter a topic") return # Create the prompt and add a style or not if state.style == "": state.prompt = ( f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters " f"and with the style of {state.style}:\n\n\n\n" ) else: state.prompt = f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\n\n" # openai configured and check if text is flagged openai = oai.Openai() flagged = openai.moderate(state.prompt) if flagged: error_prompt_flagged(state, f"Prompt: {state.prompt}\n") return else: # Generate the tweet state.n_requests += 1 state.tweet = openai.complete(state.prompt).strip().replace('"', "") # Notify the user in console and in the GUI logging.info( f"Topic: {state.prompt}{state.mood}{state.style}\n" f"Tweet: {state.tweet}" ) notify(state, "success", "Tweet created!") def generate_image(state): """Generate Tweet image.""" notify(state, "info", "Generating image...") # Check the number of requests done by the user if state.n_requests >= 5: error_too_many_requests(state) return state.image = None # Creates the prompt prompt_wo_hashtags = re.sub("#[A-Za-z0-9_]+", "", state.prompt) processing_prompt = ( "Create a detailed but brief description of an image that captures " f"the essence of the following text:\n{prompt_wo_hashtags}\n\n" ) # Openai configured and check if text is flagged openai = oai.Openai() flagged = openai.moderate(processing_prompt) if flagged: error_prompt_flagged(state, processing_prompt) return else: state.n_requests += 1 # Generate the prompt that will create the image processed_prompt = ( openai.complete(prompt=processing_prompt, temperature=0.5, max_tokens=40) .strip() .replace('"', "") .split(".")[0] + "." ) # Generate the image state.image = openai.image(processed_prompt) # Notify the user in console and in the GUI logging.info(f"Tweet: {state.prompt}\nImage prompt: {processed_prompt}") notify(state, "success", f"Image created!") def feeling_lucky(state): """Generate a feeling-lucky tweet.""" with open("moods.txt") as f: sample_moods = f.read().splitlines() state.topic = "an interesting topic" state.mood = random.choice(sample_moods) state.style = "" generate_text(state) # Variables tweet = "" prompt = "" n_requests = 0 topic = "AI" mood = "inspirational" style = "elonmusk" image = None # Called whever there is a problem def on_exception(state, function_name: str, ex: Exception): logging.error(f"Problem {ex} \nin {function_name}") notify(state, "error", f"Problem {ex} \nin {function_name}") def update_documents(state: taipy.gui.state, docs: list[dict]) -> None: """ Updates a partial with a list of documents Args: state: The state of the GUI docs: A list of documents """ updated_part = "" for doc in docs: title = doc["title"] summary = doc["summary"] link = doc["link"] updated_part += f""" <a href="{link}" target="_blank"> <h3>{title}</h3> </a> <p>{summary}</p> <br/> """ state.p.update_content(state, updated_part) # Markdown for the entire page ## <text| ## |text> ## "text" here is just a name given to my part/my section ## it has no meaning in the code page = """ <|container| # **Generate**{: .color-primary} Tweets This mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL·E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal). <br/> <a href="{azaz}" target="_blank"> <h3>{sqdqs}</h3> </a> <p>{qfqffqs}</p> <br/> <|layout|columns=1 1 1|gap=30px|class_name=card| <topic| ## **Topic**{: .color-primary} (or hashtag) <|{topic}|input|label=Topic (or hashtag)|> |topic> <mood| ## **Mood**{: .color-primary} <|{mood}|input|label=Mood (e.g. inspirational, funny, serious) (optional)|> |mood> <style| ## Twitter **account**{: .color-primary} <|{style}|input|label=Twitter account handle to style-copy recent Tweets (optional)|> |style> Create a Generate text button <|Generate text|button|on_action=generate_text|label=Generate text|> <|Feeling lucky|button|on_action=feeling_lucky|label=Feeling Lucky|> |> <br/> --- <br/> ### Generated **Tweet**{: .color-primary} Create a text input for the tweet <|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|> <center><|Generate image|button|on_action=generate_image|label=Generate image|active={prompt!="" and tweet!=""}|></center> <image|part|render={prompt != "" and tweet != "" and image is not None}|class_name=card| ### **Image**{: .color-primary} from Dall-e Display image <center><|{image}|image|height=400px|></center> |image> Break line <br/> **Code from [@kinosal](https://twitter.com/kinosal)** Original code can be found [here](https://github.com/kinosal/tweet) |> """ if __name__ == "__main__": Gui(page).run(dark_mode=False, port=5089)
|
# Create app for py2jsonl3.py py2jsonl3.py
import os
import json
EXCLUDED_FILES = ["CODE_OF_CONDUCT.md", "CONTRIBUTING.md", "INSTALLATION.md", "README.md"]
def find_files(directory, extensions):
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(extensions) and file not in EXCLUDED_FILES:
yield os.path.join(root, file)
def extract_content(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
return file.read()
def write_to_jsonl(output_file, data):
with open(output_file, 'a', encoding='utf-8') as file:
json_record = json.dumps(data)
file.write(json_record + '\n')
def main(directory, output_file):
for file_path in find_files(directory, ('.py', '.md')):
file_content = extract_content(file_path)
file_comment = f"# Create app for {os.path.basename(file_path)}"
data = {"text": file_comment + '\n' + file_content}
write_to_jsonl(output_file, data)
directory = 'taipy_repos3' # Replace with the path to your directory
output_file = 'output.jsonl' # Name of the output JSONL file
main(directory, output_file)
|
# Create app for demo-remove-background main.py
from taipy.gui import Gui, notify
from rembg import remove
from PIL import Image
from io import BytesIO
path_upload = ""
path_download = "fixed_img.png"
original_image = None
fixed_image = None
fixed = False
page = """<|toggle|theme|>
<page|layout|columns=300px 1fr|
<|sidebar|
### Removing **Background**{: .color-primary} from your image
<br/>
Upload and download
<|{path_upload}|file_selector|on_action=fix_image|extensions=.png,.jpg|label=Upload original image|>
<br/>
Download it here
<|{path_download}|file_download|label=Download fixed image|active={fixed}|>
|>
<|container|
# Image Background **Eliminator**{: .color-primary}
🐶 Give it a try by uploading an image to witness the seamless removal of the background. You can download images in full quality from the sidebar.
This code is open source and accessible on [GitHub](https://github.com/Avaiga/demo-remove-background).
<br/>
<images|layout|columns=1 1|
<col1|card text-center|part|render={fixed}|
### Original Image 📷
<|{original_image}|image|>
|col1>
<col2|card text-center|part|render={fixed}|
### Fixed Image 🔧
<|{fixed_image}|image|>
|col2>
|images>
|>
|page>
"""
def convert_image(img):
buf = BytesIO()
img.save(buf, format="PNG")
byte_im = buf.getvalue()
return byte_im
def fix_image(state):
notify(state, 'info', 'Uploading original image...')
image = Image.open(state.path_upload)
notify(state, 'info', 'Removing the background...')
fixed_image = remove(image)
fixed_image.save("fixed_img.png")
notify(state, 'success', 'Background removed successfully!')
state.original_image = convert_image(image)
state.fixed_image = convert_image(fixed_image)
state.fixed = True
if __name__ == "__main__":
Gui(page=page).run(margin="0px", title='Background Remover')
|
# Create app for demo-tweet-generation oai.py
"""OpenAI API connector."""
# Import from standard library
import os
import logging
# Import from 3rd party libraries
import openai
import os
# Assign credentials from environment variable or streamlit secrets dict
openai.api_key = "Enter your token here"
# Suppress openai request/response logging
# Handle by manually changing the respective APIRequestor methods in the openai package
# Does not work hosted on Streamlit since all packages are re-installed by Poetry
# Alternatively (affects all messages from this logger):
logging.getLogger("openai").setLevel(logging.WARNING)
class Openai:
"""OpenAI Connector."""
@staticmethod
def moderate(prompt: str) -> bool:
"""Call OpenAI GPT Moderation with text prompt.
Args:
prompt: text prompt
Return: boolean if flagged
"""
try:
response = openai.Moderation.create(prompt)
return response["results"][0]["flagged"]
except Exception as e:
logging.error(f"OpenAI API error: {e}")
@staticmethod
def complete(prompt: str, temperature: float = 0.9, max_tokens: int = 50) -> str:
"""Call OpenAI GPT Completion with text prompt.
Args:
prompt: text prompt
Return: predicted response text
"""
kwargs = {
"engine": "text-davinci-003",
"prompt": prompt,
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": 1, # default
"frequency_penalty": 0, # default,
"presence_penalty": 0, # default
}
try:
response = openai.Completion.create(**kwargs)
return response["choices"][0]["text"]
except Exception as e:
logging.error(f"OpenAI API error: {e}")
@staticmethod
def image(prompt: str) -> str:
"""Call OpenAI Image Create with text prompt.
Args:
prompt: text prompt
Return: image url
"""
try:
response = openai.Image.create(
prompt=prompt,
n=1,
size="512x512",
response_format="url",
)
return response["data"][0]["url"]
except Exception as e:
logging.error(f"OpenAI API error: {e}")
|
# Create app for demo-tweet-generation main.py
# Import from standard library
import logging
import random
import re
# Import from 3rd party libraries
from taipy.gui import Gui, notify
# Import modules
import oai
# Configure logger
logging.basicConfig(format="\n%(asctime)s\n%(message)s", level=logging.INFO, force=True)
def error_prompt_flagged(state, prompt):
"""Notify user that a prompt has been flagged."""
notify(state, "error", "Prompt flagged as inappropriate.")
logging.info(f"Prompt flagged as inappropriate: {prompt}")
def error_too_many_requests(state):
"""Notify user that too many requests have been made."""
notify(state, "error", "Too many requests. Please wait a few seconds before generating another text or image.")
logging.info(f"Session request limit reached: {state.n_requests}")
state.n_requests = 1
# Define functions
def generate_text(state):
"""Generate Tweet text."""
state.tweet = ""
state.image = None
# Check the number of requests done by the user
if state.n_requests >= 5:
error_too_many_requests(state)
return
# Check if the user has put a topic
if state.topic == "":
notify(state, "error", "Please enter a topic")
return
# Create the prompt and add a style or not
if state.style == "":
state.prompt = (
f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters "
f"and with the style of {state.style}:\n\n\n\n"
)
else:
state.prompt = f"Write a {state.mood}Tweet about {state.topic} in less than 120 characters:\n\n"
# openai configured and check if text is flagged
openai = oai.Openai()
flagged = openai.moderate(state.prompt)
if flagged:
error_prompt_flagged(state, f"Prompt: {state.prompt}\n")
return
else:
# Generate the tweet
state.n_requests += 1
state.tweet = (
openai.complete(state.prompt).strip().replace('"', "")
)
# Notify the user in console and in the GUI
logging.info(
f"Topic: {state.prompt}{state.mood}{state.style}\n"
f"Tweet: {state.tweet}"
)
notify(state, "success", "Tweet created!")
def generate_image(state):
"""Generate Tweet image."""
notify(state, "info", "Generating image...")
# Check the number of requests done by the user
if state.n_requests >= 5:
error_too_many_requests(state)
return
state.image = None
# Creates the prompt
prompt_wo_hashtags = re.sub("#[A-Za-z0-9_]+", "", state.prompt)
processing_prompt = (
"Create a detailed but brief description of an image that captures "
f"the essence of the following text:\n{prompt_wo_hashtags}\n\n"
)
# Openai configured and check if text is flagged
openai = oai.Openai()
flagged = openai.moderate(processing_prompt)
if flagged:
error_prompt_flagged(state, processing_prompt)
return
else:
state.n_requests += 1
# Generate the prompt that will create the image
processed_prompt = (
openai.complete(
prompt=processing_prompt, temperature=0.5, max_tokens=40
)
.strip()
.replace('"', "")
.split(".")[0]
+ "."
)
# Generate the image
state.image = openai.image(processed_prompt)
# Notify the user in console and in the GUI
logging.info(f"Tweet: {state.prompt}\nImage prompt: {processed_prompt}")
notify(state, "success", f"Image created!")
# Variables
tweet = ""
prompt = ""
n_requests = 0
topic = "AI"
mood = "inspirational"
style = "elonmusk"
image = None
# Called whever there is a problem
def on_exception(state, function_name: str, ex: Exception):
logging.error(f"Problem {ex} \nin {function_name}")
notify(state, 'error', f"Problem {ex} \nin {function_name}")
# Markdown for the entire page
## <text|
## |text>
## "text" here is just a name given to my part/my section
## it has no meaning in the code
page = """
<|container|
# **Generate**{: .color-primary} Tweets
This mini-app generates Tweets using OpenAI's GPT-3 based [Davinci model](https://beta.openai.com/docs/models/overview) for texts and [DALL·E](https://beta.openai.com/docs/guides/images) for images. You can find the code on [GitHub](https://github.com/Avaiga/demo-tweet-generation) and the original author on [Twitter](https://twitter.com/kinosal).
<br/>
<|layout|columns=1 1 1|gap=30px|class_name=card|
<topic|
## **Topic**{: .color-primary} (or hashtag)
<|{topic}|input|label=Topic (or hashtag)|>
|topic>
<mood|
## **Mood**{: .color-primary}
<|{mood}|input|label=Mood (e.g. inspirational, funny, serious) (optional)|>
|mood>
<style|
## Twitter **account**{: .color-primary}
<|{style}|input|label=Twitter account handle to style-copy recent Tweets (optional)|>
|style>
<|Generate text|button|on_action=generate_text|label=Generate text|>
|>
<br/>
---
<br/>
### Generated **Tweet**{: .color-primary}
<|{tweet}|input|multiline|label=Resulting tweet|class_name=fullwidth|>
<center><|Generate image|button|on_action=generate_image|label=Generate image|active={prompt!="" and tweet!=""}|></center>
<image|part|render={prompt != "" and tweet != "" and image is not None}|class_name=card|
### **Image**{: .color-primary} from Dall-e
<center><|{image}|image|height=400px|></center>
|image>
<br/>
**Code from [@kinosal](https://twitter.com/kinosal)**
Original code can be found [here](https://github.com/kinosal/tweet)
|>
"""
if __name__ == "__main__":
Gui(page).run(title='Tweet Generation')
|
# Create app for demo-realtime-pollution sender.py
# echo-client.py
import math
import time
import socket
import pickle
import numpy as np
HOST = "127.0.0.1"
PORT = 65432
init_lat = 49.247
init_long = 1.377
factory_lat = 49.246
factory_long = 1.369
diff_lat = abs(init_lat - factory_lat) * 15
diff_long = abs(init_long - factory_long) * 15
lats_unique = np.arange(init_lat - diff_lat, init_lat + diff_lat, 0.001)
longs_unique = np.arange(init_long - diff_long, init_long + diff_long, 0.001)
countdown = 20
def pollution(lat: float, long: float):
"""
Return pollution level in percentage
Pollution should be centered around the factory
Pollution should decrease with distance to factory
Pollution should have an added random component
Args:
- lat: latitude
- long: longitude
Returns:
- pollution level
"""
global countdown
return 80 * (0.5 + 0.5 * math.sin(countdown / 20)) * math.exp(
-(0.8 * (lat - factory_lat) ** 2 + 0.2 * (long - factory_long) ** 2) / 0.00005
) + np.random.randint(0, 50)
lats = []
longs = []
pollutions = []
for lat in lats_unique:
for long in longs_unique:
lats.append(lat)
longs.append(long)
pollutions.append(pollution(lat, long))
def update():
"""
Update the pollution levels
"""
for i, _ in enumerate(lats):
pollutions[i] = pollution(lats[i], longs[i])
return pollutions
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
while True:
data = pickle.dumps(pollutions)
s.sendall(data)
print(f"Sent Data: {pollutions[:5]}")
pollutions = update()
countdown += 5
time.sleep(5)
|
# Create app for demo-realtime-pollution receiver.py
import socket
import pickle
import math
from threading import Thread
from taipy.gui import Gui, State, invoke_callback, get_state_id
import numpy as np
import pandas as pd
init_lat = 49.247
init_long = 1.377
factory_lat = 49.246
factory_long = 1.369
diff_lat = abs(init_lat - factory_lat) * 15
diff_long = abs(init_long - factory_long) * 15
lats_unique = np.arange(init_lat - diff_lat, init_lat + diff_lat, 0.001)
longs_unique = np.arange(init_long - diff_long, init_long + diff_long, 0.001)
countdown = 20
periods = 0
line_data = pd.DataFrame({"Time": [], "Max AQI": []})
drone_data = pd.DataFrame(
{
"Drone ID": [43, 234, 32, 23, 5, 323, 12, 238, 21, 84],
"Battery Level": [
"86%",
"56%",
"45%",
"12%",
"85%",
"67%",
"34%",
"78%",
"90%",
"100%",
],
"AQI": [40, 34, 24, 22, 33, 45, 23, 34, 23, 34],
"Status": [
"Moving",
"Measuring",
"Measuring",
"Stopped",
"Measuring",
"Moving",
"Moving",
"Measuring",
"Measuring",
"Measuring",
],
}
)
HOST = "127.0.0.1"
PORT = 65432
layout_map = {
"mapbox": {
"style": "open-street-map",
"center": {"lat": init_lat, "lon": init_long},
"zoom": 13,
},
"dragmode": "false",
"margin": {"l": 0, "r": 0, "b": 0, "t": 0},
}
layout_line = {
"title": "Max Measured AQI over Time",
"yaxis": {"range": [0, 150]},
}
options = {
"opacity": 0.8,
"colorscale": "Bluered",
"zmin": 0,
"zmax": 140,
"colorbar": {"title": "AQI"},
"hoverinfo": "none",
}
config = {"scrollZoom": False, "displayModeBar": False}
def pollution(lat: float, long: float):
"""
Return pollution level in percentage
Pollution should be centered around the factory
Pollution should decrease with distance to factory
Pollution should have an added random component
Args:
- lat: latitude
- long: longitude
Returns:
- pollution level
"""
global countdown
return 80 * (0.5 + 0.5 * math.sin(countdown / 20)) * math.exp(
-(0.8 * (lat - factory_lat) ** 2 + 0.2 * (long - factory_long) ** 2) / 0.00005
) + np.random.randint(0, 50)
lats = []
longs = []
pollutions = []
times = []
max_pollutions = []
for lat in lats_unique:
for long in longs_unique:
lats.append(lat)
longs.append(long)
pollutions.append(pollution(lat, long))
data_province_displayed = pd.DataFrame(
{
"Latitude": lats,
"Longitude": longs,
"Pollution": pollutions,
}
)
max_pollution = data_province_displayed["Pollution"].max()
# Socket handler
def client_handler(gui: Gui, state_id_list: list):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen()
conn, _ = s.accept()
while True:
if data := conn.recv(1024 * 1024):
pollutions = pickle.loads(data)
print(f"Data received: {pollutions[:5]}")
if hasattr(gui, "_server") and state_id_list:
invoke_callback(
gui,
state_id_list[0],
update_pollutions,
[pollutions],
)
else:
print("Connection closed")
break
# Gui declaration
state_id_list = []
Gui.add_shared_variable("pollutions")
def on_init(state: State):
state_id = get_state_id(state)
if (state_id := get_state_id(state)) is not None and state_id != "":
state_id_list.append(state_id)
update_pollutions(state, pollutions)
def update_pollutions(state: State, val):
state.pollutions = val
state.data_province_displayed = pd.DataFrame(
{
"Latitude": lats,
"Longitude": longs,
"Pollution": state.pollutions,
}
)
# Add an hour to the time
state.periods = state.periods + 1
state.max_pollutions = state.max_pollutions + [max(state.pollutions)]
state.times = pd.date_range(
"2020-11-04", periods=len(state.max_pollutions), freq="H"
)
state.line_data = pd.DataFrame(
{
"Time": state.times,
"Max AQI": state.max_pollutions,
}
)
page = """
<|{data_province_displayed}|chart|type=densitymapbox|plot_config={config}|options={options}|lat=Latitude|lon=Longitude|layout={layout_map}|z=Pollution|mode=markers|class_name=map|height=40vh|>
<|layout|columns=1 2 2|
<|part|class_name=card|
**Max Measured AQI:**<br/><br/><br/>
<|{int(data_province_displayed["Pollution"].max())}|indicator|value={int(data_province_displayed["Pollution"].max())}|min=140|max=0|>
<br/><br/>
**Average Measured AQI:**<br/><br/><br/>
<|{int(data_province_displayed["Pollution"].mean())}|indicator|value={int(data_province_displayed["Pollution"].mean())}|min=140|max=0|>
|>
<|part|class_name=card|
<|{drone_data}|table|show_all=True|>
|>
<|part|class_name=card|
<|{line_data[-30:]}|chart|type=lines|x=Time|y=Max AQI|layout={layout_line}|height=40vh|>
|>
|>
"""
gui = Gui(page=page)
t = Thread(
target=client_handler,
args=(
gui,
state_id_list,
),
)
t.start()
gui.run(run_browser=False)
|
# Create app for demo-pyspark-penguin-app config.py
### app/config.py
import datetime as dt
import os
import subprocess
import sys
from pathlib import Path
import pandas as pd
import taipy as tp
from taipy import Config
SCRIPT_DIR = Path(__file__).parent
SPARK_APP_PATH = SCRIPT_DIR / "penguin_spark_app.py"
input_csv_path = str(SCRIPT_DIR / "penguins.csv")
# -------------------- Data Nodes --------------------
input_csv_path_cfg = Config.configure_data_node(id="input_csv_path", default_data=input_csv_path)
# Path to save the csv output of the spark app
output_csv_path_cfg = Config.configure_data_node(id="output_csv_path")
processed_penguin_df_cfg = Config.configure_parquet_data_node(
id="processed_penguin_df", validity_period=dt.timedelta(days=1)
)
species_cfg = Config.configure_data_node(id="species") # "Adelie", "Chinstrap", "Gentoo"
island_cfg = Config.configure_data_node(id="island") # "Biscoe", "Dream", "Torgersen"
sex_cfg = Config.configure_data_node(id="sex") # "male", "female"
output_cfg = Config.configure_json_data_node(
id="output",
)
# -------------------- Tasks --------------------
def spark_process(input_csv_path: str, output_csv_path: str) -> pd.DataFrame:
proc = subprocess.Popen(
[
str(Path(sys.executable).with_name("spark-submit")),
str(SPARK_APP_PATH),
"--input-csv-path",
input_csv_path,
"--output-csv-path",
output_csv_path,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
outs, errs = proc.communicate(timeout=15)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
if proc.returncode != os.EX_OK:
raise Exception("Spark training failed")
df = pd.read_csv(output_csv_path)
return df
def filter(penguin_df: pd.DataFrame, species: str, island: str, sex: str) -> dict:
df = penguin_df[(penguin_df.species == species) & (penguin_df.island == island) & (penguin_df.sex == sex)]
output = df[["bill_length_mm", "bill_depth_mm", "flipper_length_mm", "body_mass_g"]].to_dict(orient="records")
return output[0] if output else dict()
spark_process_task_cfg = Config.configure_task(
id="spark_process",
function=spark_process,
skippable=True,
input=[input_csv_path_cfg, output_csv_path_cfg],
output=processed_penguin_df_cfg,
)
filter_task_cfg = Config.configure_task(
id="filter",
function=filter,
skippable=True,
input=[processed_penguin_df_cfg, species_cfg, island_cfg, sex_cfg],
output=output_cfg,
)
scenario_cfg = Config.configure_scenario(
id="scenario", task_configs=[spark_process_task_cfg, filter_task_cfg]
)
|
# Create app for demo-pyspark-penguin-app main.py
### app/main.py
from pathlib import Path
from typing import Optional
import taipy as tp
from config import scenario_cfg
from taipy.gui import Gui, notify
valid_features: dict[str, list[str]] = {
"species": ["Adelie", "Chinstrap", "Gentoo"],
"island": ["Torgersen", "Biscoe", "Dream"],
"sex": ["Male", "Female"],
}
selected_species = valid_features["species"][0]
selected_island = valid_features["island"][0]
selected_sex = valid_features["sex"][0]
selected_scenario: Optional[tp.Scenario] = None
data_dir = Path(__file__).with_name("data")
data_dir.mkdir(exist_ok=True)
def scenario_on_creation(state, id, payload):
_ = payload["config"]
date = payload["date"]
label = payload["label"]
properties = payload["properties"]
# Create scenario with selected configuration
scenario = tp.create_scenario(scenario_cfg, creation_date=date, name=label)
scenario.properties.update(properties)
# Write the selected GUI values to the scenario
scenario.species.write(state.selected_species)
scenario.island.write(state.selected_island)
scenario.sex.write(state.selected_sex.lower())
output_csv_file = data_dir / f"{scenario.id}.csv"
scenario.output_csv_path.write(str(output_csv_file))
notify(state, "S", f"Created {scenario.id}")
return scenario
def scenario_on_submission_change(state, submittable, details):
"""When the selected_scenario's submission status changes, reassign selected_scenario to force a GUI refresh."""
state.selected_scenario = submittable
selected_data_node = None
main_md = """
<|layout|columns=1 4|gap=1.5rem|
<lhs|part|
# Spark with **Taipy**{: .color-primary}
## Scenario
<|{selected_scenario}|scenario_selector|on_creation=scenario_on_creation|>
----------
## Scenario info
<|{selected_scenario}|scenario|on_submission_change=scenario_on_submission_change|>
|lhs>
<rhs|part|render={selected_scenario}|
## Selections
<selections|layout|columns=1 1 1 2|gap=1.5rem|
<|{selected_species}|selector|lov={valid_features["species"]}|dropdown|label=Species|>
<|{selected_island}|selector|lov={valid_features["island"]}|dropdown|label=Island|>
<|{selected_sex}|selector|lov={valid_features["sex"]}|dropdown|label=Sex|>
|selections>
----------
## Output
**<|{str(selected_scenario.output.read()) if selected_scenario and selected_scenario.output.is_ready_for_reading else 'Submit the scenario using the left panel.'}|text|raw|class_name=color-primary|>**
## Data node inspector
<|{selected_data_node}|data_node_selector|display_cycles=False|>
**Data node value:**
<|{str(selected_data_node.read()) if selected_data_node and selected_data_node.is_ready_for_reading else None}|>
<br/>
----------
## DAG
<|Scenario DAG|expandable|
<|{selected_scenario}|scenario_dag|>
|>
|rhs>
|>
"""
def on_change(state, var_name: str, var_value):
if var_name == "selected_species":
state.selected_scenario.species.write(var_value)
elif var_name == "selected_island":
state.selected_scenario.island.write(var_value)
elif var_name == "selected_sex":
state.selected_scenario.sex.write(var_value.lower())
if __name__ == "__main__":
tp.Core().run()
gui = Gui(main_md)
gui.run(title="Spark with Taipy")
|
# Create app for demo-pyspark-penguin-app penguin_spark_app.py
### app/penguin_spark_app.py
import argparse
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--input-csv-path", required=True, help="Path to the input penguin CSV file.")
parser.add_argument("--output-csv-path", required=True, help="Path to save the output CSV file.")
args = parser.parse_args()
import pyspark.pandas as ps
from pyspark.sql import SparkSession
def read_penguin_df(csv_path: str):
penguin_df = ps.read_csv(csv_path)
return penguin_df
def clean(df: ps.DataFrame) -> ps.DataFrame:
return df[df.sex.isin(["male", "female"])].dropna()
def process(df: ps.DataFrame) -> ps.DataFrame:
"""The mean of measured penguin values, grouped by island and sex."""
mean_df = df.groupby(by=["species", "island", "sex"]).agg("mean").drop(columns="year").reset_index()
return mean_df
if __name__ == "__main__":
spark = SparkSession.builder.appName("Mean Penguin").getOrCreate()
penguin_df = read_penguin_df(args.input_csv_path)
cleaned_penguin_df = clean(penguin_df)
processed_penguin_df = process(cleaned_penguin_df)
processed_penguin_df.to_pandas().to_csv(args.output_csv_path, index=False)
sys.exit(os.EX_OK)
|
# Create app for demo-dask-customer-analysis config.py
from taipy import Config
from algos.algo import (
preprocess_and_score,
featurization_and_segmentation,
segment_analysis,
high_value_cust_summary_statistics,
)
# -------------------- Data Nodes --------------------
path_to_data_cfg = Config.configure_data_node(id="path_to_data", default_data="data/customers_data.csv")
scored_df_cfg = Config.configure_data_node(id="scored_df")
payment_threshold_cfg = Config.configure_data_node(id="payment_threshold", default_data=1000)
score_threshold_cfg = Config.configure_data_node(id="score_threshold", default_data=1.5)
segmented_customer_df_cfg = Config.configure_data_node(id="segmented_customer_df")
metric_cfg = Config.configure_data_node(id="metric", default_data="mean")
segment_result_cfg = Config.configure_data_node(id="segment_result")
summary_statistic_type_cfg = Config.configure_data_node(id="summary_statistic_type", default_data="median")
high_value_summary_df_cfg = Config.configure_data_node(id="high_value_summary_df")
# -------------------- Tasks --------------------
preprocess_and_score_task_cfg = Config.configure_task(
id="preprocess_and_score",
function=preprocess_and_score,
skippable=True,
input=[path_to_data_cfg],
output=[scored_df_cfg],
)
featurization_and_segmentation_task_cfg = Config.configure_task(
id="featurization_and_segmentation",
function=featurization_and_segmentation,
skippable=True,
input=[scored_df_cfg, payment_threshold_cfg, score_threshold_cfg],
output=[segmented_customer_df_cfg],
)
segment_analysis_task_cfg = Config.configure_task(
id="segment_analysis",
function=segment_analysis,
skippable=True,
input=[segmented_customer_df_cfg, metric_cfg],
output=[segment_result_cfg],
)
high_value_cust_summary_statistics_task_cfg = Config.configure_task(
id="high_value_cust_summary_statistics",
function=high_value_cust_summary_statistics,
skippable=True,
input=[segment_result_cfg, segmented_customer_df_cfg, summary_statistic_type_cfg],
output=[high_value_summary_df_cfg],
)
scenario_cfg = Config.configure_scenario(
id="scenario_1",
task_configs=[
preprocess_and_score_task_cfg,
featurization_and_segmentation_task_cfg,
segment_analysis_task_cfg,
high_value_cust_summary_statistics_task_cfg,
],
)
|
# Create app for demo-dask-customer-analysis algo.py
import time
import dask.dataframe as dd
import pandas as pd
def preprocess_and_score(path_to_original_data: str):
print("__________________________________________________________")
print("1. TASK 1: DATA PREPROCESSING AND CUSTOMER SCORING ...")
start_time = time.perf_counter() # Start the timer
# Step 1: Read data using Dask
df = dd.read_csv(path_to_original_data)
# Step 2: Simplify the customer scoring formula
df["CUSTOMER_SCORE"] = (
0.5 * df["TotalPurchaseAmount"] / 1000 + 0.3 * df["NumberOfPurchases"] / 10 + 0.2 * df["AverageReviewScore"]
)
# Save all customers to a new CSV file
scored_df = df[["CUSTOMER_SCORE", "TotalPurchaseAmount", "NumberOfPurchases", "TotalPurchaseTime"]]
pd_df = scored_df.compute()
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return pd_df
def featurization_and_segmentation(scored_df, payment_threshold, score_threshold):
print("__________________________________________________________")
print("2. TASK 2: FEATURE ENGINEERING AND SEGMENTATION ...")
# payment_threshold, score_threshold = float(payment_threshold), float(score_threshold)
start_time = time.perf_counter() # Start the timer
df = scored_df
# Feature: Indicator if customer's total purchase is above the payment threshold
df["HighSpender"] = (df["TotalPurchaseAmount"] > payment_threshold).astype(int)
# Feature: Average time between purchases
df["AverageTimeBetweenPurchases"] = df["TotalPurchaseTime"] / df["NumberOfPurchases"]
# Additional computationally intensive features
df["Interaction1"] = df["TotalPurchaseAmount"] * df["NumberOfPurchases"]
df["Interaction2"] = df["TotalPurchaseTime"] * df["CUSTOMER_SCORE"]
df["PolynomialFeature"] = df["TotalPurchaseAmount"] ** 2
# Segment customers based on the score_threshold
df["ValueSegment"] = ["High Value" if score > score_threshold else "Low Value" for score in df["CUSTOMER_SCORE"]]
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return df
def segment_analysis(df: pd.DataFrame, metric):
print("__________________________________________________________")
print("3. TASK 3: SEGMENT ANALYSIS ...")
start_time = time.perf_counter() # Start the timer
# Detailed analysis for each segment: mean/median of various metrics
segment_analysis = (
df.groupby("ValueSegment")
.agg(
{
"CUSTOMER_SCORE": metric,
"TotalPurchaseAmount": metric,
"NumberOfPurchases": metric,
"TotalPurchaseTime": metric,
"HighSpender": "sum", # Total number of high spenders in each segment
"AverageTimeBetweenPurchases": metric,
}
)
.reset_index()
)
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return segment_analysis
def high_value_cust_summary_statistics(df: pd.DataFrame, segment_analysis: pd.DataFrame, summary_statistic_type: str):
print("__________________________________________________________")
print("4. TASK 4: ADDITIONAL ANALYSIS BASED ON SEGMENT ANALYSIS ...")
start_time = time.perf_counter() # Start the timer
# Filter out the High Value customers
high_value_customers = df[df["ValueSegment"] == "High Value"]
# Use summary_statistic_type to calculate different types of summary statistics
if summary_statistic_type == "mean":
average_purchase_high_value = high_value_customers["TotalPurchaseAmount"].mean()
elif summary_statistic_type == "median":
average_purchase_high_value = high_value_customers["TotalPurchaseAmount"].median()
elif summary_statistic_type == "max":
average_purchase_high_value = high_value_customers["TotalPurchaseAmount"].max()
elif summary_statistic_type == "min":
average_purchase_high_value = high_value_customers["TotalPurchaseAmount"].min()
median_score_high_value = high_value_customers["CUSTOMER_SCORE"].median()
# Fetch the summary statistic for 'TotalPurchaseAmount' for High Value customers from segment_analysis
segment_statistic_high_value = segment_analysis.loc[
segment_analysis["ValueSegment"] == "High Value", "TotalPurchaseAmount"
].values[0]
# Create a DataFrame to hold the results
result_df = pd.DataFrame(
{
"SummaryStatisticType": [summary_statistic_type],
"AveragePurchaseHighValue": [average_purchase_high_value],
"MedianScoreHighValue": [median_score_high_value],
"SegmentAnalysisHighValue": [segment_statistic_high_value],
}
)
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return result_df
if __name__ == "__main__":
t1 = preprocess_and_score("data/customers_data.csv")
t2 = featurization_and_segmentation(t1, 1500, 1.5)
t3 = segment_analysis(t2, "mean")
t4 = high_value_cust_summary_statistics(t2, t3, "mean")
print(t4)
|
# Create app for demo-taipy-gui-starter-1 main.py
from taipy.gui import Gui
from math import cos, exp
page = """
#This is *Taipy* GUI
A value: <|{decay}|>.
A slider: <br/>
<|{decay}|slider|>
My chart:
<|{data}|chart|>
"""
def compute_data(decay):
return [cos(i/16) * exp(-i*decay/6000) for i in range(720)]
def on_change(state, var_name, var_value):
if var_name == 'decay':
state.data = compute_data(var_value)
decay = 10
data = compute_data(decay)
Gui(page=page).run(title='Taipy Demo GUI 1',
dark_mode=False)
|
# Create app for demo-churn-classification main.py
import pandas as pd
import taipy as tp
from taipy.gui import Gui, Icon, navigate
from config.config import scenario_cfg
from taipy.config import Config
from pages.main_dialog import *
import warnings
with warnings.catch_warnings():
warnings.simplefilter(action='ignore', category=FutureWarning)
# Load configuration
Config.load('config/config.toml')
scenario_cfg = Config.scenarios['churn_classification']
# Execute the scenario
tp.Core().run()
def create_first_scenario(scenario_cfg):
"""Create and submit the first scenario."""
scenario = tp.create_scenario(scenario_cfg)
tp.submit(scenario)
return scenario
scenario = create_first_scenario(scenario_cfg)
# Read datasets
train_dataset = scenario.train_dataset.read()
test_dataset = scenario.test_dataset.read()
roc_dataset = scenario.roc_data_ml.read()
# Process test dataset columns
test_dataset.columns = [str(column).upper() for column in test_dataset.columns]
# Prepare data for visualization
select_x = test_dataset.drop('EXITED',axis=1).columns.tolist()
x_selected = select_x[0]
select_y = select_x
y_selected = select_y[1]
# Read results and create charts
values = scenario.results_ml.read()
forecast_series = values['Forecast']
scatter_dataset_pred = creation_scatter_dataset_pred(test_dataset, forecast_series)
histo_full_pred = creation_histo_full_pred(test_dataset, forecast_series)
histo_full = creation_histo_full(test_dataset)
scatter_dataset = creation_scatter_dataset(test_dataset)
features_table = scenario.feature_importance_ml.read()
accuracy_graph, f1_score_graph, score_auc_graph = compare_models_baseline(scenario, ['ml', 'baseline'])
def create_charts(model_type):
"""Create pie charts and metrics for the given model type."""
metrics = c_update_metrics(scenario, model_type)
(number_of_predictions, accuracy, f1_score, score_auc,
number_of_good_predictions, number_of_false_predictions,
fp_, tp_, fn_, tn_) = metrics
pie_plotly = pd.DataFrame({
"values": [number_of_good_predictions, number_of_false_predictions],
"labels": ["Correct predictions", "False predictions"]
})
distrib_class = pd.DataFrame({
"values": [len(values[values["Historical"]==0]), len(values[values["Historical"]==1])],
"labels": ["Stayed", "Exited"]
})
score_table = pd.DataFrame({
"Score": ["Predicted stayed", "Predicted exited"],
"Stayed": [tn_, fp_],
"Exited": [fn_, tp_]
})
pie_confusion_matrix = pd.DataFrame({
"values": [tp_, tn_, fp_, fn_],
"labels": ["True Positive", "True Negative", "False Positive", "False Negative"]
})
return (number_of_predictions, number_of_false_predictions, number_of_good_predictions,
accuracy, f1_score, score_auc, pie_plotly, distrib_class, score_table, pie_confusion_matrix)
# Initialize charts
chart_metrics = create_charts('ml')
(number_of_predictions, number_of_false_predictions, number_of_good_predictions,
accuracy, f1_score, score_auc, pie_plotly, distrib_class, score_table, pie_confusion_matrix) = chart_metrics
def on_change(state, var_name, var_value):
"""Handle variable changes in the GUI."""
if var_name in ['x_selected', 'y_selected']:
update_histogram_and_scatter(state)
elif var_name == 'mm_algorithm_selected':
update_variables(state, var_value.lower())
elif var_name in ['mm_algorithm_selected', 'db_table_selected']:
handle_temp_csv_path(state)
# GUI initialization
menu_lov = [
("Data Visualization", Icon('images/histogram_menu.svg', 'Data Visualization')),
("Model Manager", Icon('images/model.svg', 'Model Manager')),
("Compare Models", Icon('images/compare.svg', 'Compare Models')),
('Databases', Icon('images/Datanode.svg', 'Databases'))
]
root_md = """
<|toggle|theme|>
<|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|>
"""
page = "Data Visualization"
def menu_fct(state, var_name, var_value):
"""Function that is called when there is a change in the menu control."""
state.page = var_value['args'][0]
navigate(state, state.page.replace(" ", "-"))
def update_variables(state, model_type):
"""Update the different variables and dataframes used in the application."""
global scenario
state.values = scenario.data_nodes[f'results_{model_type}'].read()
state.forecast_series = state.values['Forecast']
metrics = c_update_metrics(scenario, model_type)
(state.number_of_predictions, state.accuracy, state.f1_score, state.score_auc,
number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_) = metrics
update_charts(state, model_type, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_)
def update_charts(state, model_type, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_):
"""This function updates all the charts of the GUI.
Args:
state: object containing all the variables used in the GUI
model_type (str): the name of the model_type shown
number_of_good_predictions (int): number of good predictions
number_of_false_predictions (int): number of false predictions
fp_ (float): false positive rate
tp_ (float): true positive rate
fn_ (float): false negative rate
tn_ (float): true negative rate
"""
state.roc_dataset = scenario.data_nodes[f'roc_data_{model_type}'].read()
state.features_table = scenario.data_nodes[f'feature_importance_{model_type}'].read()
state.score_table = pd.DataFrame({"Score":["Predicted stayed", "Predicted exited"],
"Stayed": [tn_, fp_],
"Exited" : [fn_, tp_]})
state.pie_confusion_matrix = pd.DataFrame({"values": [tp_, tn_, fp_, fn_],
"labels" : ["True Positive", "True Negative", "False Positive", "False Negative"]})
state.scatter_dataset_pred = creation_scatter_dataset_pred(test_dataset, state.forecast_series)
state.histo_full_pred = creation_histo_full_pred(test_dataset, state.forecast_series)
# pie charts
state.pie_plotly = pd.DataFrame({"values": [number_of_good_predictions, number_of_false_predictions],
"labels": ["Correct predictions", "False predictions"]})
state.distrib_class = pd.DataFrame({"values": [len(state.values[state.values["Historical"]==0]),
len(state.values[state.values["Historical"]==1])],
"labels" : ["Stayed", "Exited"]})
def on_init(state):
update_histogram_and_scatter(state)
# Define pages
pages = {
"/": root_md + dialog_md,
"Data-Visualization": dv_data_visualization_md,
"Model-Manager": mm_model_manager_md,
"Compare-Models": cm_compare_models_md,
"Databases": db_databases_md,
}
# Run the GUI
if __name__ == '__main__':
gui = Gui(pages=pages)
gui.run(title="Churn classification", dark_mode=False, port=8494)
|
# Create app for demo-churn-classification config.py
from algos.algos import *
from taipy import Config, Scope
##############################################################################################################################
# Creation of the datanodes
##############################################################################################################################
# How to connect to the database
path_to_csv = 'data/churn.csv'
# path for csv and file_path for pickle
initial_dataset_cfg = Config.configure_data_node(id="initial_dataset",
path=path_to_csv,
storage_type="csv",
has_header=True)
date_cfg = Config.configure_data_node(id="date", default_data="None")
preprocessed_dataset_cfg = Config.configure_data_node(id="preprocessed_dataset")
# the final datanode that contains the processed data
train_dataset_cfg = Config.configure_data_node(id="train_dataset")
# the final datanode that contains the processed data
trained_model_ml_cfg = Config.configure_data_node(id="trained_model_ml")
trained_model_baseline_cfg= Config.configure_data_node(id="trained_model_baseline")
# the final datanode that contains the processed data
test_dataset_cfg = Config.configure_data_node(id="test_dataset")
forecast_dataset_ml_cfg = Config.configure_data_node(id="forecast_dataset_ml")
forecast_dataset_baseline_cfg = Config.configure_data_node(id="forecast_dataset_baseline")
roc_data_ml_cfg = Config.configure_data_node(id="roc_data_ml")
roc_data_baseline_cfg = Config.configure_data_node(id="roc_data_baseline")
score_auc_ml_cfg = Config.configure_data_node(id="score_auc_ml")
score_auc_baseline_cfg = Config.configure_data_node(id="score_auc_baseline")
metrics_ml_cfg = Config.configure_data_node(id="metrics_ml")
metrics_baseline_cfg = Config.configure_data_node(id="metrics_baseline")
feature_importance_ml_cfg = Config.configure_data_node(id="feature_importance_ml")
feature_importance_baseline_cfg = Config.configure_data_node(id="feature_importance_baseline")
results_ml_cfg = Config.configure_data_node(id="results_ml")
results_baseline_cfg = Config.configure_data_node(id="results_baseline")
##############################################################################################################################
# Creation of the tasks
##############################################################################################################################
# the task will make the link between the input data node
# and the output data node while executing the function
# initial_dataset --> preprocess dataset --> preprocessed_dataset
task_preprocess_dataset_cfg = Config.configure_task(id="preprocess_dataset",
input=[initial_dataset_cfg,date_cfg],
function=preprocess_dataset,
output=preprocessed_dataset_cfg)
# preprocessed_dataset --> create train data --> train_dataset, test_dataset
task_create_train_test_cfg = Config.configure_task(id="create_train_and_test_data",
input=preprocessed_dataset_cfg,
function=create_train_test_data,
output=[train_dataset_cfg, test_dataset_cfg])
# train_dataset --> create train_model data --> trained_model
task_train_model_baseline_cfg = Config.configure_task(id="train_model_baseline",
input=train_dataset_cfg,
function=train_model_baseline,
output=[trained_model_baseline_cfg,feature_importance_baseline_cfg])
# train_dataset --> create train_model data --> trained_model
task_train_model_ml_cfg = Config.configure_task(id="train_model_ml",
input=train_dataset_cfg,
function=train_model_ml,
output=[trained_model_ml_cfg,feature_importance_ml_cfg])
# test_dataset --> forecast --> forecast_dataset
task_forecast_baseline_cfg = Config.configure_task(id="predict_the_test_data_baseline",
input=[test_dataset_cfg, trained_model_baseline_cfg],
function=forecast,
output=forecast_dataset_baseline_cfg)
# test_dataset --> forecast --> forecast_dataset
task_forecast_ml_cfg = Config.configure_task(id="predict_the_test_data_ml",
input=[test_dataset_cfg, trained_model_ml_cfg],
function=forecast,
output=forecast_dataset_ml_cfg)
task_roc_ml_cfg = Config.configure_task(id="task_roc_ml",
input=[forecast_dataset_ml_cfg, test_dataset_cfg],
function=roc_from_scratch,
output=[roc_data_ml_cfg,score_auc_ml_cfg])
task_roc_baseline_cfg = Config.configure_task(id="task_roc_baseline",
input=[forecast_dataset_baseline_cfg, test_dataset_cfg],
function=roc_from_scratch,
output=[roc_data_baseline_cfg,score_auc_baseline_cfg])
task_create_metrics_baseline_cfg = Config.configure_task(id="task_create_metrics_baseline",
input=[forecast_dataset_baseline_cfg,test_dataset_cfg],
function=create_metrics,
output=metrics_baseline_cfg)
task_create_metrics_ml_cfg = Config.configure_task(id="task_create_metrics",
input=[forecast_dataset_ml_cfg,test_dataset_cfg],
function=create_metrics,
output=metrics_ml_cfg)
task_create_results_baseline_cfg = Config.configure_task(id="task_create_results_baseline",
input=[forecast_dataset_baseline_cfg,test_dataset_cfg],
function=create_results,
output=results_baseline_cfg)
task_create_results_ml_cfg = Config.configure_task(id="task_create_results_ml",
input=[forecast_dataset_ml_cfg,test_dataset_cfg],
function=create_results,
output=results_ml_cfg)
##############################################################################################################################
# Creation of the scenario
##############################################################################################################################
scenario_cfg = Config.configure_scenario(id="churn_classification",
task_configs=[task_create_metrics_baseline_cfg,
task_create_metrics_ml_cfg,
task_create_results_baseline_cfg,
task_create_results_ml_cfg,
task_forecast_baseline_cfg,
task_forecast_ml_cfg,
task_roc_ml_cfg,
task_roc_baseline_cfg,
task_train_model_baseline_cfg,
task_train_model_ml_cfg,
task_preprocess_dataset_cfg,
task_create_train_test_cfg])
Config.export('config/config.toml')
|
# Create app for demo-churn-classification algos.py
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import datetime as dt
import pandas as pd
import numpy as np
##############################################################################################################################
# Function used in the tasks
##############################################################################################################################
def preprocess_dataset(initial_dataset: pd.DataFrame, date: dt.datetime="None"):
"""This function preprocess the dataset to be used in the model
Args:
initial_dataset (pd.DataFrame): the raw format when we first read the data
Returns:
pd.DataFrame: the preprocessed dataset for classification
"""
print("\n Preprocessing the dataset...")
#We filter the dataframe on the date
if date != "None":
initial_dataset['Date'] = pd.to_datetime(initial_dataset['Date'])
processed_dataset = initial_dataset[initial_dataset['Date'] <= date]
print(len(processed_dataset))
else:
processed_dataset = initial_dataset
processed_dataset = processed_dataset[['CreditScore','Geography','Gender','Age','Tenure','Balance','NumOfProducts','HasCrCard','IsActiveMember','EstimatedSalary','Exited']]
processed_dataset = pd.get_dummies(processed_dataset)
if 'Gender_Female' in processed_dataset.columns:
processed_dataset.drop('Gender_Female',axis=1,inplace=True)
processed_dataset = processed_dataset.apply(pd.to_numeric)
columns_to_select = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'HasCrCard',
'IsActiveMember', 'EstimatedSalary', 'Geography_France', 'Geography_Germany',
'Geography_Spain', 'Gender_Male','Exited']
processed_dataset = processed_dataset[[col for col in columns_to_select if col in processed_dataset.columns]]
print(" Preprocessing done!\n")
return processed_dataset
def create_train_test_data(preprocessed_dataset: pd.DataFrame):
"""This function will create the train data by segmenting the dataset
Args:
preprocessed_dataset (pd.DataFrame): the preprocessed dataset
Returns:
pd.DataFrame: the training dataset
"""
print("\n Creating the training and testing dataset...")
X_train, X_test, y_train, y_test = train_test_split(preprocessed_dataset.iloc[:,:-1],preprocessed_dataset.iloc[:,-1],test_size=0.2,random_state=42)
train_data = pd.concat([X_train,y_train],axis=1)
test_data = pd.concat([X_test,y_test],axis=1)
print(" Creating done!")
return train_data, test_data
def train_model_baseline(train_dataset: pd.DataFrame):
"""Function to train the Logistic Regression model
Args:
train_dataset (pd.DataFrame): the training dataset
Returns:
model (LogisticRegression): the fitted model
"""
print(" Training the model...\n")
X,y = train_dataset.iloc[:,:-1],train_dataset.iloc[:,-1]
model_fitted = LogisticRegression().fit(X,y)
print("\n ",model_fitted," is trained!")
importance_dict = {'Features' : X.columns, 'Importance':model_fitted.coef_[0]}
importance = pd.DataFrame(importance_dict).sort_values(by='Importance',ascending=True)
return model_fitted, importance
def train_model_ml(train_dataset: pd.DataFrame):
"""Function to train the Logistic Regression model
Args:
train_dataset (pd.DataFrame): the training dataset
Returns:
model (RandomForest): the fitted model
"""
print(" Training the model...\n")
X,y = train_dataset.iloc[:,:-1],train_dataset.iloc[:,-1]
model_fitted = RandomForestClassifier().fit(X,y)
print("\n ",model_fitted," is trained!")
importance_dict = {'Features' : X.columns, 'Importance':model_fitted.feature_importances_}
importance = pd.DataFrame(importance_dict).sort_values(by='Importance',ascending=True)
return model_fitted, importance
def forecast(test_dataset: pd.DataFrame, trained_model: RandomForestClassifier):
"""Function to forecast the test dataset
Args:
test_dataset (pd.DataFrame): the test dataset
trained_model (LogisticRegression): the fitted model
Returns:
forecast (pd.DataFrame): the forecasted dataset
"""
print(" Forecasting the test dataset...")
X,y = test_dataset.iloc[:,:-1],test_dataset.iloc[:,-1]
#predictions = trained_model.predict(X)
predictions = trained_model.predict_proba(X)[:, 1]
print(" Forecasting done!")
return predictions
def roc_from_scratch(probabilities, test_dataset, partitions=100):
print(" Calculation of the ROC curve...")
y_test = test_dataset.iloc[:,-1]
roc = np.array([])
for i in range(partitions + 1):
threshold_vector = np.greater_equal(probabilities, i / partitions).astype(int)
tpr, fpr = true_false_positive(threshold_vector, y_test)
roc = np.append(roc, [fpr, tpr])
roc_np = roc.reshape(-1, 2)
roc_data = pd.DataFrame({"False positive rate": roc_np[:, 0], "True positive rate": roc_np[:, 1]})
print(" Calculation done")
print(" Scoring...")
score_auc = roc_auc_score(y_test, probabilities)
print(" Scoring done\n")
return roc_data, score_auc
def true_false_positive(threshold_vector:np.array, y_test:np.array):
"""Function to calculate the true positive rate and the false positive rate
Args:
threshold_vector (np.array): the test dataset
y_test (np.array): the fitted model
Returns:
tpr (pd.DataFrame): the forecasted dataset
fpr (pd.DataFrame): the forecasted dataset
"""
true_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 1)
true_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 0)
false_positive = np.equal(threshold_vector, 1) & np.equal(y_test, 0)
false_negative = np.equal(threshold_vector, 0) & np.equal(y_test, 1)
tpr = true_positive.sum() / (true_positive.sum() + false_negative.sum())
fpr = false_positive.sum() / (false_positive.sum() + true_negative.sum())
return tpr, fpr
def create_metrics(predictions:np.array, test_dataset:np.array):
print(" Creating the metrics...")
threshold = 0.5
threshold_vector = np.greater_equal(predictions, threshold).astype(int)
y_test = test_dataset.iloc[:,-1]
true_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 1)).sum()
true_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 0)).sum()
false_positive = (np.equal(threshold_vector, 1) & np.equal(y_test, 0)).sum()
false_negative = (np.equal(threshold_vector, 0) & np.equal(y_test, 1)).sum()
f1_score = np.around(2*true_positive/(2*true_positive+false_positive+false_negative), decimals=2)
accuracy = np.around((true_positive+true_negative)/(true_positive+true_negative+false_positive+false_negative), decimals=2)
dict_ftpn = {"tp": true_positive, "tn": true_negative, "fp": false_positive, "fn": false_negative}
number_of_good_predictions = true_positive + true_negative
number_of_false_predictions = false_positive + false_negative
metrics = {"f1_score": f1_score,
"accuracy": accuracy,
"dict_ftpn": dict_ftpn,
'number_of_predictions': len(predictions),
'number_of_good_predictions':number_of_good_predictions,
'number_of_false_predictions':number_of_false_predictions}
return metrics
def create_results(forecast_values,test_dataset):
forecast_series_proba = pd.Series(np.around(forecast_values,decimals=2), index=test_dataset.index, name='Probability')
forecast_series = pd.Series((forecast_values>0.5).astype(int), index=test_dataset.index, name='Forecast')
true_series = pd.Series(test_dataset.iloc[:,-1], name="Historical",index=test_dataset.index)
index_series = pd.Series(range(len(true_series)), index=test_dataset.index, name="Id")
results = pd.concat([index_series, forecast_series_proba, forecast_series, true_series], axis=1)
return results
|
# Create app for demo-churn-classification main_dialog.py
from pages.compare_models_md import *
from pages.data_visualization_md import *
from pages.databases_md import *
from pages.model_manager_md import *
dr_show_roc = False
dialog_md = """
<|dialog|open={dr_show_roc}|title=ROC Curve|on_action={lambda s: s.assign("dr_show_roc", False)}|labels=Close|width=1000px|
<|{roc_dataset}|chart|x=False positive rate|y[1]=True positive rate|label[1]=True positive rate|height=500px|width=900px|type=scatter|>
|>
"""
|
# Create app for demo-churn-classification databases_md.py
import pathlib
# This path is used to create a temporary CSV file download the table
tempdir = pathlib.Path(".tmp")
tempdir.mkdir(exist_ok=True)
PATH_TO_TABLE = str(tempdir / "table.csv")
# Selector to select the table to show
db_table_selector = ['Training Dataset', 'Test Dataset', 'Forecast Dataset', 'Confusion Matrix']
db_table_selected = db_table_selector[0]
def handle_temp_csv_path(state):
"""This function checks if the temporary csv file exists. If it does, it is deleted. Then, the temporary csv file
is created for the right table
Args:
state: object containing all the variables used in the GUI
"""
if state.db_table_selected == 'Test Dataset':
state.test_dataset.to_csv(PATH_TO_TABLE, sep=';')
if state.db_table_selected == 'Confusion Matrix':
state.score_table.to_csv(PATH_TO_TABLE, sep=';')
if state.db_table_selected == "Training Dataset":
state.train_dataset.to_csv(PATH_TO_TABLE, sep=';')
if state.db_table_selected == "Forecast Dataset":
state.values.to_csv(PATH_TO_TABLE, sep=';')
# Aggregation of the strings to create the complete page
db_databases_md = """
# Data**bases**{: .color-primary}
<|layout|columns=2 2 1|
<|{mm_algorithm_selected}|selector|lov={mm_algorithm_selector}|dropdown|label=Algorithm|active=False|>
<|{db_table_selected}|selector|lov={db_table_selector}|dropdown|label=Table|>
<|{PATH_TO_TABLE}|file_download|name=table.csv|label=Download table|>
|>
<Confusion|part|render={db_table_selected=='Confusion Matrix'}|
<|{score_table}|table|width=fit-content|show_all|class_name=ml-auto mr-auto|>
|Confusion>
<Training|part|render={db_table_selected=='Training Dataset'}|
<|{train_dataset}|table|>
|Training>
<Forecast|part|render={db_table_selected=='Forecast Dataset'}|
<|{values}|table|width=fit-content|style={lambda s,i,r: 'red_color' if r['Historical']!=r['Forecast'] else 'green_color'}|class_name=ml-auto mr-auto|>
|Forecast>
<test_dataset|part|render={db_table_selected=='Test Dataset'}|
<|{test_dataset}|table|>
|test_dataset>
"""
|
# Create app for demo-churn-classification data_visualization_md.py
import pandas as pd
import numpy as np
dv_graph_selector = ['Histogram','Scatter']
dv_graph_selected = dv_graph_selector[0]
# Histograms dialog
properties_histo_full = {}
properties_scatter_dataset = {}
def creation_scatter_dataset(test_dataset:pd.DataFrame):
"""This function creates the dataset for the scatter plot. For every column (except Exited), scatter_dataset will have a positive and negative version.
The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one.
Args:
test_dataset (pd.DataFrame): the test dataset
Returns:
pd.DataFrame: the datafram
"""
scatter_dataset = test_dataset.copy()
for column in scatter_dataset.columns:
if column != 'EXITED' :
column_neg = str(column)+'_neg'
column_pos = str(column)+'_pos'
scatter_dataset[column_neg] = scatter_dataset[column]
scatter_dataset[column_pos] = scatter_dataset[column]
scatter_dataset.loc[(scatter_dataset['EXITED'] == 1),column_neg] = np.NaN
scatter_dataset.loc[(scatter_dataset['EXITED'] == 0),column_pos] = np.NaN
return scatter_dataset
def creation_histo_full(test_dataset:pd.DataFrame):
"""This function creates the dataset for the histogram plot. For every column (except Exited), histo_full will have a positive and negative version.
The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one.
Args:
test_dataset (pd.DataFrame): the test dataset
Returns:
pd.DataFrame: the Dataframe used to display the Histogram
"""
histo_full = test_dataset.copy()
for column in histo_full.columns:
column_neg = str(column)+'_neg'
histo_full[column_neg] = histo_full[column]
histo_full.loc[(histo_full['EXITED'] == 1),column_neg] = np.NaN
histo_full.loc[(histo_full['EXITED'] == 0),column] = np.NaN
return histo_full
def update_histogram_and_scatter(state):
global x_selected, y_selected
x_selected = state.x_selected
y_selected = state.y_selected
state.properties_scatter_dataset = {"x":x_selected,
"y[1]":y_selected+'_pos',
"y[2]":y_selected+'_neg'}
state.scatter_dataset = state.scatter_dataset
state.scatter_dataset_pred = state.scatter_dataset_pred
state.properties_histo_full = {"x[1]":x_selected,
"x[2]":x_selected+'_neg'}
state.histo_full = state.histo_full
state.histo_full_pred = state.histo_full_pred
dv_data_visualization_md = """
# Data **Visualization**{: .color-primary}
<|{dv_graph_selected}|toggle|lov={dv_graph_selector}|>
--------------------------------------------------------------------
<|part|render={dv_graph_selected == 'Histogram'}|
### Histogram
<|{x_selected}|selector|lov={select_x}|dropdown=True|label=Select x|>
<|{histo_full}|chart|type=histogram|properties={properties_histo_full}|rebuild|y=EXITED|label=EXITED|color[1]=red|color[2]=green|name[1]=Exited|name[2]=Stayed|height=600px|>
|>
<|part|render={dv_graph_selected == 'Scatter'}|
### Scatter
<|layout|columns= 1 2|
<|{x_selected}|selector|lov={select_x}|dropdown|label=Select x|>
<|{y_selected}|selector|lov={select_y}|dropdown|label=Select y|>
|>
<|{scatter_dataset}|chart|properties={properties_scatter_dataset}|rebuild|color[1]=red|color[2]=green|name[1]=Exited|name[2]=Stayed|mode=markers|type=scatter|height=600px|>
|>
"""
|
# Create app for demo-churn-classification compare_models_md.py
import numpy as np
from sklearn.metrics import f1_score
import pandas as pd
import numpy as np
cm_height_histo = "100%"
cm_dict_barmode = {"barmode": "stack","margin":{"t":30}}
cm_options_md = "height={cm_height_histo}|width={cm_height_histo}|layout={cm_dict_barmode}"
cm_compare_models_md = """
# Model comparison
----
<br/>
<br/>
<br/>
<|layout|columns= 1 1 1|columns[mobile]=1|
<|{accuracy_graph}|chart|type=bar|x=Model Type|y[1]=Accuracy Model|y[2]=Accuracy Baseline|title=Accuracy|""" + cm_options_md + """|>
<|{f1_score_graph}|chart|type=bar|x=Model Type|y[1]=F1 Score Model|y[2]=F1 Score Baseline|title=F1 Score|""" + cm_options_md + """|>
<|{score_auc_graph}|chart|type=bar|x=Model Type|y[1]=AUC Score Model|y[2]=AUC Score Baseline|title=AUC Score|""" + cm_options_md + """|>
|>
"""
def compare_charts(accuracies, f1_scores, scores_auc, names):
"""This funcion creates the pandas Dataframes (charts) used in the model comparison page
Args:
accuracies (list): list of accuracies
f1_scores (list): list of f1 scores
scores_auc (list): list of auc scores
names (list): list of scenario names
Returns:
pd.DataFrame: the resulting three pd.DataFrame
"""
accuracy_graph = pd.DataFrame(create_metric_dict(accuracies, "Accuracy", names))
f1_score_graph = pd.DataFrame(create_metric_dict(f1_scores, "F1 Score", names))
score_auc_graph = pd.DataFrame(create_metric_dict(scores_auc, "AUC Score", names))
return accuracy_graph, f1_score_graph, score_auc_graph
def compare_models_baseline(scenario,model_types):
"""This function creates the objects for the model comparison
Args:
scenario (scenario): the selected scenario
model_types (str): the name of the selected model type
Returns:
pd.DataFrame: the resulting three pd.DataFrame
"""
accuracies = []
f1_scores = []
scores_auc = []
names = []
for model_type in model_types:
(_,accuracy,f1_score,score_auc,_,_,_,_,_,_) = c_update_metrics(scenario, model_type)
accuracies.append(accuracy)
f1_scores.append(f1_score)
scores_auc.append(score_auc)
names.append('Model' if model_type != "baseline" else "Baseline")
accuracy_graph,f1_score_graph, score_auc_graph = compare_charts(accuracies, f1_scores, scores_auc, names)
return accuracy_graph, f1_score_graph, score_auc_graph
def create_metric_dict(metric, metric_name, names):
"""This function creates a dictionary of metrics for multiple models that will be used in a Dataframe shown on the Gui
Args:
metric (list): the value of the metric
metric_name (str): the name of the metric
names (list): list of scenario names
Returns:
dict: dicitonary used for a pandas Dataframe
"""
metric_dict = {}
initial_list = [0]*len(names)
metric_dict["Model Type"] = names
for i in range(len(names)):
current_list = initial_list.copy()
current_list[i] = metric[i]
metric_dict[metric_name +" "+ names[i].capitalize()] = current_list
return metric_dict
def c_update_metrics(scenario, model_type):
"""This function updates the metrics of a scenario using a model
Args:
scenario (scenario): the selected scenario
model_type (str): the name of the selected model_type
Returns:
obj: a number of values, lists that represent the metrics
"""
metrics = scenario.data_nodes[f'metrics_{model_type}'].read()
number_of_predictions = metrics['number_of_predictions']
number_of_good_predictions = metrics['number_of_good_predictions']
number_of_false_predictions = metrics['number_of_false_predictions']
accuracy = np.around(metrics['accuracy'], decimals=2)
f1_score = np.around(metrics['f1_score'], decimals=2)
score_auc = np.around(scenario.data_nodes[f'score_auc_{model_type}'].read(), decimals=2)
dict_ftpn = metrics['dict_ftpn']
fp_ = dict_ftpn['fp']
tp_ = dict_ftpn['tp']
fn_ = dict_ftpn['fn']
tn_ = dict_ftpn['tn']
return number_of_predictions, accuracy, f1_score, score_auc, number_of_good_predictions, number_of_false_predictions, fp_, tp_, fn_, tn_
|
# Create app for demo-churn-classification model_manager_md.py
import pandas as pd
import numpy as np
mm_graph_selector_scenario = ['Metrics', 'Features', 'Histogram','Scatter']
mm_graph_selected_scenario = mm_graph_selector_scenario[0]
mm_algorithm_selector = ['Baseline', 'ML']
mm_algorithm_selected = 'ML'
mm_pie_color_dict_2 = {"piecolorway":["#00D08A","#FE913C"]}
mm_pie_color_dict_4 = {"piecolorway":["#00D08A","#81F1A0","#F3C178","#FE913C"]}
mm_margin_features = {'margin': {'l': 150}}
def creation_scatter_dataset_pred(test_dataset:pd.DataFrame, forecast_series:pd.Series):
"""This function creates the dataset for the scatter plot for the predictions. For every column (except EXITED) will have a positive and negative version.
EXITED is here a binary indicating if the prediction is good or bad.
The positive column will have NaN when the Exited is zero and the negative column will have NaN when the Exited is one.
Args:
test_dataset (pd.DataFrame): the test dataset
forecast_series (pd.DataFrame): the forecast dataset
Returns:
pd.DataFrame: the Dataframe used to display the Histogram
"""
scatter_dataset = test_dataset.copy()
scatter_dataset['EXITED'] = (scatter_dataset['EXITED']!=forecast_series.to_numpy()).astype(int)
for column in scatter_dataset.columns:
if column != 'EXITED' :
column_neg = str(column)+'_neg'
column_pos = str(column)+'_pos'
scatter_dataset[column_neg] = scatter_dataset[column]
scatter_dataset[column_pos] = scatter_dataset[column]
scatter_dataset.loc[(scatter_dataset['EXITED'] == 1),column_neg] = np.NaN
scatter_dataset.loc[(scatter_dataset['EXITED'] == 0),column_pos] = np.NaN
return scatter_dataset
def creation_histo_full_pred(test_dataset:pd.DataFrame,forecast_series:pd.Series):
"""This function creates the dataset for the histogram plot for the predictions. For every column (except PREDICTION) will have a positive and negative version.
PREDICTION is a binary indicating if the prediction is good or bad.
The positive column will have NaN when the PREDICTION is zero and the negative column will have NaN when the PREDICTION is one.
Args:
test_dataset (pd.DataFrame): the test dataset
forecast_series (pd.DataFrame): the forecast dataset
Returns:
pd.DataFrame: the Dataframe used to display the Histogram
"""
histo_full = test_dataset.copy()
histo_full['EXITED'] = (histo_full['EXITED']!=forecast_series.to_numpy()).astype(int)
histo_full.columns = histo_full.columns.str.replace('EXITED', 'PREDICTION')
for column in histo_full.columns:
column_neg = str(column)+'_neg'
histo_full[column_neg] = histo_full[column]
histo_full.loc[(histo_full['PREDICTION'] == 1),column_neg] = np.NaN
histo_full.loc[(histo_full['PREDICTION'] == 0),column] = np.NaN
return histo_full
mm_model_manager_md = """
# **Model**{: .color-primary} Manager
<|layout|columns=3 2 2 2|
<|{mm_graph_selected_scenario}|toggle|lov={mm_graph_selector_scenario}|>
<|{mm_algorithm_selected}|selector|lov={mm_algorithm_selector}|dropdown|label=Algorithm|>
<|show roc|button|on_action={lambda s: s.assign("dr_show_roc", True)}|>
<br/> **Number of predictions:** <|{number_of_predictions}|>
|>
-----------------------------------------------------------------
<Metrics|part|render={mm_graph_selected_scenario == 'Metrics'}|
### Metrics
<|layout|columns=1 1 1|columns[mobile]=1|
<accuracy|
<|{accuracy}|indicator|value={accuracy}|min=0|max=1|>
**Model accuracy**
{: .text-center}
<|{pie_plotly}|chart|title=Accuracy of predictions model|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_2}|>
|accuracy>
<score_auc|
<|{score_auc}|indicator|value={score_auc}|min=0|max=1|>
**Model AUC**
{: .text-center}
<|{pie_confusion_matrix}|chart|title=Confusion Matrix|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_4}|>
|score_auc>
<f1_score|
<|{f1_score}|indicator|value={f1_score}|min=0|max=1|>
**Model F1-score**
{: .text-center}
<|{distrib_class}|chart|title=Distribution between Exited and Stayed|values=values|labels=labels|type=pie|layout={mm_pie_color_dict_2}|>
|f1_score>
|>
|Metrics>
<Features|part|render={mm_graph_selected_scenario == 'Features'}|
### Features
<|{features_table}|chart|type=bar|y=Features|x=Importance|orientation=h|layout={mm_margin_features}|>
|Features>
<Histogram|part|render={mm_graph_selected_scenario == 'Histogram'}|
### Histogram
<|{x_selected}|selector|lov={select_x}|dropdown|label=Select x|>
<|{histo_full_pred}|chart|type=histogram|properties={properties_histo_full}|rebuild|y=PREDICTION|label=PREDICTION|color[1]=red|color[2]=green|name[1]=Good Predictions|name[2]=Bad Predictions|height=600px|>
|Histogram>
<Scatter|part|render={mm_graph_selected_scenario == 'Scatter'}|
### Scatter
<|layout|columns=1 2|
<|{x_selected}|selector|lov={select_x}|dropdown|label=Select x|>
<|{y_selected}|selector|lov={select_y}|dropdown=True|label=Select y|>
|>
<|{scatter_dataset_pred}|chart|properties={properties_scatter_dataset}|rebuild|color[1]=red|color[2]=green|name[1]=Bad prediction|name[2]=Good prediction|mode=markers|type=scatter|height=600px|>
|Scatter>
"""
|
# Create app for demo-stock-visualization main.py
from taipy.gui import Gui, notify
from datetime import date
import yfinance as yf
from prophet import Prophet
import pandas as pd
# Parameters for retrieving the stock data
start_date = "2015-01-01"
end_date = date.today().strftime("%Y-%m-%d")
selected_stock = 'AAPL'
n_years = 1
def get_stock_data(ticker, start, end):
ticker_data = yf.download(ticker, start, end) # downloading the stock data from START to TODAY
ticker_data.reset_index(inplace=True) # put date in the first column
ticker_data['Date'] = pd.to_datetime(ticker_data['Date']).dt.tz_localize(None)
return ticker_data
def get_data_from_range(state):
print("GENERATING HIST DATA")
start_date = state.start_date if type(state.start_date)==str else state.start_date.strftime("%Y-%m-%d")
end_date = state.end_date if type(state.end_date)==str else state.end_date.strftime("%Y-%m-%d")
state.data = get_stock_data(state.selected_stock, start_date, end_date)
if len(state.data) == 0:
notify(state, "error", f"Not able to download data {state.selected_stock} from {start_date} to {end_date}")
return
notify(state, 's', 'Historical data has been updated!')
notify(state, 'w', 'Deleting previous predictions...')
state.forecast = pd.DataFrame(columns=['Date', 'Lower', 'Upper'])
def generate_forecast_data(data, n_years):
# FORECASTING
df_train = data[['Date', 'Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"}) # This is the format that Prophet accepts
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=n_years * 365)
fc = m.predict(future)[['ds', 'yhat_lower', 'yhat_upper']].rename(columns={"ds": "Date", "yhat_lower": "Lower", "yhat_upper": "Upper"})
print("Process Completed!")
return fc
def forecast_display(state):
notify(state, 'i', 'Predicting...')
state.forecast = generate_forecast_data(state.data, state.n_years)
notify(state, 's', 'Prediction done! Forecast data has been updated!')
#### Getting the data, make initial forcast and build a front end web-app with Taipy GUI
data = get_stock_data(selected_stock, start_date, end_date)
forecast = generate_forecast_data(data, n_years)
show_dialog = False
partial_md = "<|{forecast}|table|>"
dialog_md = "<|{show_dialog}|dialog|partial={partial}|title=Forecast Data|on_action={lambda state: state.assign('show_dialog', False)}|>"
page = dialog_md + """<|toggle|theme|>
<|container|
# Stock Price **Analysis**{: .color-primary} Dashboard
<|layout|columns=1 2 1|gap=40px|class_name=card p2|
<dates|
#### Selected **Period**{: .color-primary}
From:
<|{start_date}|date|on_change=get_data_from_range|>
To:
<|{end_date}|date|on_change=get_data_from_range|>
|dates>
<ticker|
#### Selected **Ticker**{: .color-primary}
Please enter a valid ticker:
<|{selected_stock}|input|label=Stock|on_action=get_data_from_range|>
or choose a popular one
<|{selected_stock}|toggle|lov=MSFT;GOOG;AAPL; AMZN; META; COIN; AMC; PYPL|on_change=get_data_from_range|>
|ticker>
<years|
#### Prediction **years**{: .color-primary}
Select number of prediction years: <|{n_years}|>
<|{n_years}|slider|min=1|max=5|>
<|PREDICT|button|on_action=forecast_display|class_name={'plain' if len(forecast)==0 else ''}|>
|years>
|>
<|Historical Data|expandable|expanded=False|
<|layout|columns=1 1|
<|
### Historical **closing**{: .color-primary} price
<|{data}|chart|mode=line|x=Date|y[1]=Open|y[2]=Close|>
|>
<|
### Historical **daily**{: .color-primary} trading volume
<|{data}|chart|mode=line|x=Date|y=Volume|>
|>
|>
### **Whole**{: .color-primary} historical data: <|{selected_stock}|text|raw|>
<|{data}|table|>
<br/>
|>
### **Forecast**{: .color-primary} Data
<|{forecast}|chart|mode=line|x=Date|y[1]=Lower|y[2]=Upper|>
<br/>
<|More info|button|on_action={lambda s: s.assign("show_dialog", True)}|>
{: .text-center}
|>
<br/>
"""
# Run Taipy GUI
gui = Gui(page)
partial = gui.add_partial(partial_md)
gui.run(dark_mode=False, title="Stock Visualization")
|
# Create app for demo-movie-genre main.py
import taipy as tp
import pandas as pd
from taipy import Config, Scope, Gui
# Create a Taipy App that will output the 7 best movies for a genre
# Taipy Core - backend definition
# Filter function for Task
def filtering_genre(initial_dataset: pd.DataFrame, selected_genre):
filtered_dataset = initial_dataset[initial_dataset['genres'].str.contains(selected_genre)]
filtered_data = filtered_dataset.nlargest(7, 'Popularity %')
return filtered_data
# Input Data Nodes configuration
initial_dataset_cfg = Config.configure_data_node(id="initial_dataset",
storage_type="csv",
path="data.csv",
scope=Scope.GLOBAL)
selected_genre_cfg = Config.configure_data_node(id="selected_genre_node",
default_data="ACTION",
scope=Scope.GLOBAL)
# Output Data Node configuration
filtered_data_cfg = Config.configure_data_node(id="filtered_data",
scope=Scope.GLOBAL)
# Task configuration
filter_task_cfg = Config.configure_task(id="filter_genre",
function=filtering_genre,
input=[initial_dataset_cfg, selected_genre_cfg],
output=filtered_data_cfg,
skippable=True)
# Pipeline configuration
pipeline_cfg = Config.configure_pipeline(id="pipeline",
task_configs=[filter_task_cfg])
# Scenario configuration
scenario_cfg = Config.configure_scenario(id="scenario", pipeline_configs=[pipeline_cfg])
# Run of the Taipy Core service
tp.Core().run()
# Creation of my scenario
scenario = tp.create_scenario(scenario_cfg)
# Taipy GUI- front end definition
# Callback definition
def modify_df(state):
scenario.selected_genre_node.write(state.selected_genre)
tp.submit(scenario)
state.df = scenario.filtered_data.read()
# Get list of genres
list_genres = ['Action', 'Adventure', 'Animation', 'Children', 'Comedy', 'Fantasy', 'IMAX', 'Romance',
'Sci-FI', 'Western', 'Crime', 'Mystery', 'Drama', 'Horror', 'Thriller', 'Film-Noir',
'War', 'Musical', 'Documentary']
# Initialization of variables
df = pd.DataFrame(columns=['Title', 'Popularity %'])
selected_genre = None
# movie_genre_app
movie_genre_app = """
# Film recommendation
## Choose your favorite genre
<|{selected_genre}|selector|lov={list_genres}|on_change=modify_df|dropdown|>
## Here are the top 7 picks
<|{df}|chart|x=Title|y=Popularity %|type=bar|title=Film Popularity|>
"""
# run the app
Gui(page=movie_genre_app).run()
|
# Create app for demo-job-monitoring __init__.py
|
# Create app for demo-job-monitoring runtime.py
from taipy import run
class App:
"""A singleton class that provides the Taipy runtime objects."""
def __new__(cls):
if not hasattr(cls, "instance"):
cls.instance = super(App, cls).__new__(cls)
return cls.instance
@property
def gui(self):
return self.__gui
@property
def core(self):
return self.__core
@gui.setter
def gui(self, gui):
self.__gui = gui
@core.setter
def core(self, core):
self.__core = core
def start(self, **kwargs):
# Starts the app by calling `taipy.run` on the core and gui objects:
run(self.__gui, self.__core, **kwargs)
|
# Create app for demo-job-monitoring main.py
from runtime import App
from pages import root, monitoring
import taipy
from taipy.config.config import Config
from taipy.gui import Gui
import os
# Variables for bindings
all_jobs = [['','','','']]
show_dialog_run_pipeline = False
selected_pipeline = None
show_details_pane = False
selected_job = None
if __name__ == "__main__":
# Initialize Taipy objects
Config.configure_job_executions(mode="standalone", nb_of_workers=4)
Config.load("app.config.toml")
App().core = taipy.Core()
App().gui = Gui(pages={"/": root.page, "monitoring": monitoring.page})
# Start the app
App().start(
title="Job Monitoring Demo",
port=os.environ.get("PORT", "8080"),
dark_mode=False,
css_file="app",
)
|
# Create app for demo-job-monitoring __init__.py
|
# Create app for demo-job-monitoring ml.py
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
# Test prediction with a Female, 19 years old, earning 20000
fixed_value = [1, 19, 20000]
def preprocess(df: pd.DataFrame) -> pd.DataFrame:
def _gender_to_int(gender):
if gender == "Female":
return 1
return 0
df["GenderNum"] = df["Gender"].apply(_gender_to_int)
return df
def train(dataset):
# X (features) are "GenderNum", "Age", "EstimatedSalary"
X = dataset[["GenderNum", "Age", "EstimatedSalary"]]
# Y is "Purchased"
Y = dataset[["Purchased"]]
# Let's split the dataset: the first 50 will be used for training,
# the rest will be for testing
split = 50
X_train, Y_train = X[:split], Y[:split]
X_test, Y_test = X[split:], Y[split:]
# Using scikit-learn default
regression = LogisticRegression(random_state=0, max_iter=10000).fit(
X_train.values, Y_train.values.ravel()
)
# Accuracy of our model:
print(f"intercept: {regression.intercept_} coefficients: {regression.coef_}")
print(f"train accuracy: {regression.score(X_train, Y_train)}")
print(f"test accuracy: {regression.score(X_test, Y_test)}") # We aim for > 0.8...
return regression
def predict(x, regression: LogisticRegression):
variables = np.array(x).reshape(1, -1)
result = regression.predict(variables)
print(f"for: {variables}, the prediction is {result}")
return result
if __name__ == "__main__":
# Testing
df = pd.read_csv("data/data.csv")
df = preprocess(df)
model = train(df)
print(predict(fixed_value, model))
|
# Create app for demo-job-monitoring debug.py
import time
def long_running(anything):
print("Waiting 20 seconds...")
time.sleep(20)
print("Done!")
return anything
def raise_exception(anything):
print("Waiting 5 seconds before raising an exception...")
time.sleep(5)
raise Exception("A very expected error occured!")
|
# Create app for demo-job-monitoring monitoring.py
import taipy as tp
from taipy.gui import get_state_id, invoke_callback, Markdown
from taipy.config.config import Config
from taipy.core.job.job import Job
from runtime import App
def get_all_jobs():
"""Returns all the known jobs (as a array of fields)."""
def _job_to_fields(job: Job) -> list[str]:
return [
job.submit_id,
job.id,
job.creation_date.strftime("%b %d %Y %H:%M:%S"),
str(job.status),
]
return [_job_to_fields(job) for job in tp.get_jobs()]
def get_all_pipelines():
"""Returns all pipelines (as an array of ids)"""
return [
pipeline.id
for pipeline in Config.pipelines.values()
if pipeline.id != "default" # we explicitely get rid of the "default" pipeline
]
def get_job_by_id(id):
"""Return a job from its id"""
found = [job for job in tp.get_jobs() if job.id == id]
if found:
return found[0]
return None
def get_job_by_index(index):
"""Return a job from its index"""
all_jobs = tp.get_jobs()
if len(all_jobs) > index:
return all_jobs[index]
return None
def get_status(job: Job):
"""Get the status of the given job as string."""
if not job:
return None
return job.status.name.lower()
# -----------------------------------------------------------------------------
# Callbacks / UI function
def on_style(state, index, row):
status_index = 3
if 'RUNNING' in row[status_index]:
return 'blue'
if 'COMPLETED' in row[status_index]:
return 'green'
if 'BLOCKED' in row[status_index]:
return 'orange'
if 'FAILED' in row[status_index]:
return 'red'
def refresh_job_list(state):
"""Refresh the job list"""
state.all_jobs = get_all_jobs()
def job_updated(state_id, pipeline, job):
"""Callback called when a job has been updated."""
# invoke_callback allows to run a function with a GUI _state_.
invoke_callback(App().gui, state_id, refresh_job_list, args=[])
def open_run_pipeline_dialog(state):
"""Opens the 'Run pipeline...' dialog."""
state.show_dialog_run_pipeline = True
def close_run_pipeline_dialog(state):
"""Closes the 'Run pipeline...' dialog."""
state.show_dialog_run_pipeline = False
def run_pipeline(state):
"""Runs a pipeline action."""
# We need to pass the state ID so that it can be restored in the job_updated listener:
state_id = get_state_id(state)
# Get selected pipeline config:
selected = state.selected_pipeline
pipeline_config = Config.pipelines[selected]
if not pipeline_config:
raise Exception(f"unknown pipeline config: {selected}")
# Close the dialog
close_run_pipeline_dialog(state)
pipeline = tp.create_pipeline(pipeline_config)
tp.subscribe_pipeline(pipeline=pipeline, callback=job_updated, params=[state_id])
tp.submit(pipeline)
def on_table_click(state, table, action, payload):
job_index = payload["index"]
selected_job = get_job_by_index(job_index)
state.selected_job = selected_job
state.show_details_pane = True
def cancel_selected_job(state):
job_id = state.selected_job.id
tp.cancel_job(state.selected_job)
state.show_details_pane = False
refresh_job_list(state)
state.selected_job = get_job_by_id(job_id)
def delete_selected_job(state):
tp.delete_job(state.selected_job, force=True)
state.show_details_pane = False
refresh_job_list(state)
# -----------------------------------------------------------------------------
# UI Configuration
columns = {
"0": {"title": "Submit ID"},
"1": {"title": "Job ID"},
"2": {"title": "Creation Date"},
"3": {"title": "Status"},
}
# -----------------------------------------------------------------------------
# Page
page = Markdown("job_monitoring/pages/monitoring.md")
|
# Create app for demo-job-monitoring __init__.py
|
# Create app for demo-job-monitoring root.py
from taipy.gui import Markdown
content = """
# Job Monitoring Demo
"""
page = Markdown(content)
|
# Create app for demo-job-monitoring monitoring.md
<|{all_jobs}|table|columns={columns}|width='100%'|on_action={on_table_click}|style=on_style|>
<|Refresh List|button|on_action={refresh_job_list}|>
<|Run Pipeline...|button|on_action={open_run_pipeline_dialog}|>
<|{show_dialog_run_pipeline}|dialog|title=Run pipeline...|
<|{selected_pipeline}|selector|lov={get_all_pipelines()}|>
<|Run|button|on_action={run_pipeline}|>
<|Cancel|button|on_action={close_run_pipeline_dialog}|>
|>
<|{show_details_pane}|pane|
# Job Details <|Delete|button|on_action=delete_selected_job|> <|Cancel|button|on_action=cancel_selected_job|>
<|layout|columns=1 1|
<|part|class_name=card|
## Task
<|{selected_job.task.config_id}|>
|>
<|part|class_name=card|
## Status
<|{get_status(selected_job)}|>
|>
|>
<|part|class_name=card|
## ID
<|{selected_job.id}|>
|>
<|part|class_name=card|
## Submission ID
<|{selected_job.submit_id}|>
|>
<|part|class_name=card|
## Creation Date
<|{selected_job.creation_date.strftime("%b %d %y %H:%M:%S")}|>
|>
<|part|class_name=card|
## Stacktrace
<|{"\n".join(selected_job.stacktrace)}|class_name=code|>
|>
----
|>
|
# Create app for demo-fraud-detection charts.py
""" Prepare data for charts """
import pandas as pd
def gen_amt_data(transactions: pd.DataFrame) -> list:
"""
Create a list of amt values for fraudulent and non-fraudulent transactions
Args:
- transactions: the transactions dataframe
Returns:
- a list of two dictionaries containing the data for the two histograms
"""
amt_fraud = transactions[transactions["fraud"]]["amt"]
amt_no_fraud = transactions[~transactions["fraud"]]["amt"]
amt_data = [
{"Amount ($)": list(amt_no_fraud)},
{"Amount ($)": list(amt_fraud)},
]
return amt_data
def gen_gender_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Create a dataframe containing the percentage of fraudulent transactions
per gender
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
male_fraud_percentage = len(
transactions[transactions["fraud"]].loc[transactions["gender"] == "M"]
) / len(transactions[transactions["fraud"]])
female_fraud_percentage = 1 - male_fraud_percentage
male_not_fraud_percentage = len(
transactions[~transactions["fraud"]].loc[transactions["gender"] == "M"]
) / len(transactions[~transactions["fraud"]])
female_not_fraud_percentage = 1 - male_not_fraud_percentage
gender_data = pd.DataFrame(
{
"Fraudulence": ["Not Fraud", "Fraud"],
"Male": [male_not_fraud_percentage, male_fraud_percentage],
"Female": [female_not_fraud_percentage, female_fraud_percentage],
}
)
return gender_data
def gen_cat_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage difference
between fraudulent and non-fraudulent transactions per category
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
categories = transactions["category"].unique()
fraud_categories = [
len(
transactions[transactions["fraud"]].loc[
transactions["category"] == category
]
)
for category in categories
]
fraud_categories_norm = [
category / len(transactions[transactions["fraud"]])
for category in fraud_categories
]
not_fraud_categories = [
len(
transactions[~transactions["fraud"]].loc[
transactions["category"] == category
]
)
for category in categories
]
not_fraud_categories_norm = [
category / len(transactions[~transactions["fraud"]])
for category in not_fraud_categories
]
diff_categories = [
fraud_categories_norm[i] - not_fraud_categories_norm[i]
for i in range(len(categories))
]
cat_data = pd.DataFrame(
{
"Category": categories,
"Difference": diff_categories,
}
)
cat_data = cat_data.sort_values(by="Difference", ascending=False)
return cat_data
def gen_age_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage of fraudulent transactions
per age
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
age = range(111)
fraud_age = [
len(transactions[transactions["fraud"]].loc[transactions["age"] == age])
/ len(transactions[transactions["fraud"]])
for age in age
]
not_fraud_age = [
len(transactions[~transactions["fraud"]].loc[transactions["age"] == age])
/ len(transactions[~transactions["fraud"]])
for age in age
]
age_data = pd.DataFrame(
{
"Age": age,
"Fraud": fraud_age,
"Not Fraud": not_fraud_age,
}
)
return age_data
def gen_hour_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage of fraudulent transactions
per hour
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
hours = range(1, 25)
fraud_hours = [
len(transactions[transactions["fraud"]].loc[transactions["hour"] == hour])
/ len(transactions[transactions["fraud"]])
for hour in hours
]
not_fraud_hours = [
len(transactions[~transactions["fraud"]].loc[transactions["hour"] == hour])
/ len(transactions[~transactions["fraud"]])
for hour in hours
]
hour_data = pd.DataFrame(
{
"Hour": hours,
"Fraud": fraud_hours,
"Not Fraud": not_fraud_hours,
}
)
return hour_data
def gen_day_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage of fraudulent transactions
per weekday
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
days = range(7)
days_names = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
fraud_days = [
len(transactions[transactions["fraud"]].loc[transactions["day"] == day])
/ len(transactions[transactions["fraud"]])
for day in days
]
not_fraud_days = [
len(transactions[~transactions["fraud"]].loc[transactions["day"] == day])
/ len(transactions[~transactions["fraud"]])
for day in days
]
day_data = pd.DataFrame(
{
"Day": days_names,
"Fraud": fraud_days,
"Not Fraud": not_fraud_days,
}
)
return day_data
def gen_month_data(transactions: pd.DataFrame) -> pd.DataFrame:
"""
Generates a dataframe with the percentage of fraudulent transactions
per month
Args:
- transactions: the transactions dataframe
Returns:
- the resulting dataframe
"""
months = range(1, 13)
months_names = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
fraud_months = [
len(transactions[transactions["fraud"]].loc[transactions["month"] == month])
/ len(transactions[transactions["fraud"]])
for month in months
]
not_fraud_months = [
len(transactions[~transactions["fraud"]].loc[transactions["month"] == month])
/ len(transactions[~transactions["fraud"]])
for month in months
]
month_data = pd.DataFrame(
{
"Month": months_names,
"Fraud": fraud_months,
"Not Fraud": not_fraud_months,
}
)
return month_data
|
# Create app for demo-fraud-detection utils.py
""" Data Manipulation and Callbacks """
import datetime as dt
import numpy as np
import pandas as pd
from taipy.gui import State, navigate, notify
import xgboost as xgb
from shap import Explainer, Explanation
from sklearn.metrics import confusion_matrix
column_names = [
"amt",
"zip",
"city_pop",
"age",
"hour",
"day",
"month",
"category_food_dining",
"category_gas_transport",
"category_grocery_net",
"category_grocery_pos",
"category_health_fitness",
"category_home",
"category_kids_pets",
"category_misc_net",
"category_misc_pos",
"category_personal_care",
"category_shopping_net",
"category_shopping_pos",
"category_travel",
]
def explain_pred(state: State, _: str, payload: dict) -> None:
"""
When a transaction is selected in the table
Explain the prediction using SHAP, update the waterfall chart
Args:
- state: the state of the app
- payload: the payload of the event containing the index of the transaction
"""
idx = payload["index"]
exp = state.explaination[idx]
feature_values = [-value for value in list(exp.values)]
data_values = list(exp.data)
for i, value in enumerate(data_values):
if isinstance(value, float):
value = round(value, 2)
data_values[i] = value
names = [f"{name}: {value}" for name, value in zip(column_names, data_values)]
exp_data = pd.DataFrame({"Feature": names, "Influence": feature_values})
exp_data["abs_importance"] = exp_data["Influence"].abs()
exp_data = exp_data.sort_values(by="abs_importance", ascending=False)
exp_data = exp_data.drop(columns=["abs_importance"])
exp_data = exp_data[:5]
state.exp_data = exp_data
if state.transactions.iloc[idx]["fraud"]:
state.fraud_text = "Why is this transaction fraudulent?"
else:
state.fraud_text = "Why is this transaction not fraudulent?"
first = state.transactions.iloc[idx]["first"]
last = state.transactions.iloc[idx]["last"]
state.specific_transactions = state.transactions[
(state.transactions["first"] == first) & (state.transactions["last"] == last)
]
state.selected_transaction = state.transactions.loc[[idx]]
state.selected_client = f"{first} {last}"
navigate(state, "Analysis")
def generate_transactions(
state: State,
df: pd.DataFrame,
model: xgb.XGBRegressor,
threshold: float,
start_date="2020-06-21",
end_date="2030-01-01",
) -> [pd.DataFrame, Explanation]:
"""
Generates a DataFrame of transactions with the fraud prediction
Args:
- state: the state of the app
- df: the DataFrame containing the transactions
- model: the model used to predict the fraud
- threshold: the threshold used to determine if a transaction is fraudulent
- start_date: the start date of the transactions
- end_date: the end date of the transactions
Returns:
- a DataFrame of transactions with the fraud prediction
"""
start_date = str(start_date)
end_date = str(end_date)
start_date_dt = dt.datetime.strptime(start_date, "%Y-%m-%d")
end_date_dt = dt.datetime.strptime(end_date, "%Y-%m-%d")
# Make sure the dates are separated by at least one day
if (end_date_dt - start_date_dt).days < 1:
notify(state, "error", "The start date must be before the end date")
raise Exception("The start date must be before the end date")
# Make sure that start_date is between 2020-06-21 and 2020-06-30
if not (dt.datetime(2020, 6, 21) <= start_date_dt <= dt.datetime(2020, 6, 30)):
notify(
state, "error", "The start date must be between 2020-06-21 and 2020-06-30"
)
raise Exception("The start date must be between 2020-06-21 and 2020-06-30")
df["age"] = dt.date.today().year - pd.to_datetime(df["dob"]).dt.year
df["hour"] = pd.to_datetime(df["trans_date_trans_time"]).dt.hour
df["day"] = pd.to_datetime(df["trans_date_trans_time"]).dt.dayofweek
df["month"] = pd.to_datetime(df["trans_date_trans_time"]).dt.month
test = df[
[
"category",
"amt",
"zip",
"city_pop",
"age",
"hour",
"day",
"month",
"is_fraud",
]
]
test = pd.get_dummies(test, drop_first=True)
test = test[df["trans_date_trans_time"].between(str(start_date), str(end_date))]
X_test = test.drop("is_fraud", axis="columns")
X_test_values = X_test.values
transactions = df[
df["trans_date_trans_time"].between(str(start_date), str(end_date))
]
raw_results = model.predict(X_test_values)
results = [str(min(1, round(result, 2))) for result in raw_results]
transactions.insert(0, "fraud_value", results)
# Low if under 0.2, Medium if under 0.5, High if over 0.5
results = ["Low" if float(result) < 0.2 else "Medium" for result in raw_results]
for i, result in enumerate(results):
if result == "Medium" and float(raw_results[i]) > 0.5:
results[i] = "High"
transactions.insert(0, "fraud_confidence", results)
results = [float(result) > threshold for result in raw_results]
transactions.insert(0, "fraud", results)
explainer = Explainer(model)
sv = explainer(X_test)
explaination = Explanation(sv, sv.base_values, X_test, feature_names=X_test.columns)
# Drop Unnamed: 0 column if it exists
if "Unnamed: 0" in transactions.columns:
transactions = transactions.drop(columns=["Unnamed: 0"])
return transactions, explaination
def update_threshold(state: State) -> None:
"""
Change the threshold used to determine if a transaction is fraudulent
Generate the confusion matrix
Args:
- state: the state of the app
"""
threshold = float(state.threshold)
results = [
float(result) > threshold for result in state.transactions["fraud_value"]
]
state.transactions["fraud"] = results
state.transactions = state.transactions
results = [
float(result) > threshold
for result in state.original_transactions["fraud_value"]
]
state.original_transactions["fraud"] = results
state.original_transactions = state.original_transactions
y_pred = results
y_true = state.original_transactions["is_fraud"]
cm = confusion_matrix(y_true, y_pred)
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
tp, tn, fp, fn = cm[1][1], cm[0][0], cm[0][1], cm[1][0]
dataset = state.original_transactions[:10000]
state.true_positives = dataset[
(dataset["is_fraud"] == True) & (dataset["fraud"] == True)
]
state.true_negatives = dataset[
(dataset["is_fraud"] == False) & (dataset["fraud"] == False)
]
state.false_positives = dataset[
(dataset["is_fraud"] == False) & (dataset["fraud"] == True)
]
state.false_negatives = dataset[
(dataset["is_fraud"] == True) & (dataset["fraud"] == False)
]
data = {
"Values": [
[fn, tp],
[tn, fp],
],
"Actual": ["Fraud", "Not Fraud"],
"Predicted": ["Not Fraud", "Fraud"],
}
layout = {
"annotations": [],
"xaxis": {"ticks": "", "side": "top"},
"yaxis": {"ticks": "", "ticksuffix": " "},
}
predicted = data["Predicted"]
actuals = data["Actual"]
for actual, _ in enumerate(actuals):
for pred, _ in enumerate(predicted):
value = data["Values"][actual][pred]
annotation = {
"x": predicted[pred],
"y": actuals[actual],
"text": f"{str(round(value, 3)*100)[:4]}%",
"font": {"color": "white" if value < 0.5 else "black", "size": 30},
"showarrow": False,
}
layout["annotations"].append(annotation)
state.confusion_data = data
state.confusion_layout = layout
update_table(state)
return (
state.true_positives,
state.true_negatives,
state.false_positives,
state.false_negatives,
)
def update_table(state: State) -> None:
"""
Updates the table of transactions displayed
Args:
- state: the state of the app
"""
if state.selected_table == "True Positives":
state.displayed_table = state.true_positives
elif state.selected_table == "False Positives":
state.displayed_table = state.false_positives
elif state.selected_table == "True Negatives":
state.displayed_table = state.true_negatives
elif state.selected_table == "False Negatives":
state.displayed_table = state.false_negatives
|
# Create app for demo-fraud-detection main.py
""" Fraud Detection App """
import pickle
import numpy as np
import pandas as pd
from taipy.gui import Gui, Icon, State, navigate, notify
from utils import (
explain_pred,
generate_transactions,
update_threshold,
update_table,
)
from charts import *
DATA_POINTS = 30000
threshold = "0.5"
threshold_lov = np.arange(0, 1, 0.01)
confusion_text = "Confusion Matrix"
fraud_text = "No row selected"
exp_data = pd.DataFrame({"Feature": [], "Influence": []})
df = pd.read_csv("data/fraud_data.csv")
df["merchant"] = df["merchant"].str[6:]
model = pickle.load(open("model.pkl", "rb"))
transactions, explaination = generate_transactions(None, df, model, float(threshold))
original_transactions = transactions
original_explaination = explaination
specific_transactions = transactions
selected_client = "No client selected"
start_date = "2020-06-21"
end_date = "2020-06-22"
selected_table = "True Positives"
true_positives = None
false_positives = None
true_negatives = None
false_negatives = None
displayed_table = None
selected_transaction = None
def fraud_style(_: State, index: int, values: list) -> str:
"""
Style the transactions table: red if fraudulent
Args:
- state: the state of the app
- index: the index of the row
Returns:
- the style of the row
"""
if values["fraud_confidence"] == "High":
return "red-row"
elif values["fraud_confidence"] == "Medium":
return "orange-row"
return ""
amt_data = gen_amt_data(transactions)
gender_data = gen_gender_data(transactions)
cat_data = gen_cat_data(transactions)
age_data = gen_age_data(transactions)
hour_data = gen_hour_data(transactions)
day_data = gen_day_data(transactions)
month_data = gen_month_data(transactions)
df = df[:DATA_POINTS]
transactions = transactions[:DATA_POINTS]
waterfall_layout = {
"margin": {"b": 150},
}
amt_options = [
{
"marker": {"color": "#4A4", "opacity": 0.8},
"xbins": {"start": 0, "end": 2000, "size": 10},
"histnorm": "probability",
},
{
"marker": {"color": "#A33", "opacity": 0.8, "text": "Compare Data"},
"xbins": {"start": 0, "end": 2000, "size": 10},
"histnorm": "probability",
},
]
amt_layout = {
"barmode": "overlay",
"showlegend": True,
}
confusion_data = pd.DataFrame({"Predicted": [], "Actual": [], "Values": []})
confusion_layout = None
confusion_options = {"colorscale": "YlOrRd", "displayModeBar": False}
confusion_config = {"scrollZoom": False, "displayModeBar": False}
transactions = df
transactions = transactions.drop("Unnamed: 0", axis="columns")
def on_init(state: State) -> None:
"""
Generate the confusion matrix on start
Args:
- state: the state of the app
"""
update_transactions(state)
state.displayed_table = state.true_positives
(
state.true_positives,
state.true_negatives,
state.false_positives,
state.false_negatives,
) = update_threshold(state)
update_table(state)
def update_transactions(state: State) -> None:
"""
Detects frauds in the selected time period
Args:
- state: the state of the app
"""
notify(state, "info", "Predicting fraud...")
state.transactions, state.explaination = generate_transactions(
state, df, model, float(state.threshold), state.start_date, state.end_date
)
state.transactions.reset_index(inplace=True)
number_of_fraud = len(state.transactions[state.transactions["fraud"] == True])
notify(state, "success", f"Predicted {number_of_fraud} fraudulent transactions")
menu_lov = [
("Transactions", Icon("images/transactions.png", "Transactions")),
("Analysis", Icon("images/analysis.png", "Analysis")),
("Fraud Distribution", Icon("images/distribution.png", "Fraud Distribution")),
("Threshold Selection", Icon("images/threshold.png", "Threshold Selection")),
]
page = "Transactions"
def menu_fct(state, var_name, var_value):
"""Function that is called when there is a change in the menu control."""
state.page = var_value["args"][0]
navigate(state, state.page.replace(" ", "-"))
ROOT = """
<|menu|label=Menu|lov={menu_lov}|on_action=menu_fct|>
"""
TRANSACTIONS_PAGE = """
# List of **Transactions**{: .color-primary}
--------------------------------------------------------------------
## Select start and end date for a prediction
<|layout|columns=1 1 3|
Start Date: <|{start_date}|date|>
End Date (excluding): <|{end_date}|date|>
|>
<|Detect Frauds|button|on_action=update_transactions|>
## Select a transaction to explain the prediction
<|{transactions}|table|on_action=explain_pred|style=fraud_style|filter|rebuild|>
"""
ANALYSIS_PAGE = """
# Prediction **Analysis**{: .color-primary}
--------------------------------------------------------------------
<|layout|columns=2 3|
<|card|
## <|{fraud_text}|text|>
<|{exp_data}|chart|type=waterfall|x=Feature|y=Influence|layout={waterfall_layout}|>
|>
<|
## Selected Transaction:
<|{selected_transaction}|table|show_all=True|rebuild||style=fraud_style|>
## Transactions of client: **<|{selected_client}|text|raw|>**{: .color-primary}
<|{specific_transactions}|table|style=fraud_style|filter|on_action=explain_pred|>
|>
|>
"""
CHART_PAGE = """
# Fraud **Distribution**{: .color-primary}
--------------------------------------------------------------------
## Charts of fraud distribution by feature
<|{amt_data}|chart|type=histogram|title=Transaction Amount Distribution|color[2]=red|color[1]=green|name[2]=Fraud|name[1]=Not Fraud|options={amt_options}|layout={amt_layout}|>
<br/><|{gender_data}|chart|type=bar|x=Fraudulence|y[1]=Male|y[2]=Female|title=Distribution of Fraud by Gender|>
<br/><|{cat_data}|chart|type=bar|x=Category|y=Difference|orientation=v|title=Difference in Fraudulence by Category (Positive = Fraudulent)|>
<br/><|{hour_data}|chart|type=bar|x=Hour|y[1]=Not Fraud|y[2]=Fraud|title=Distribution of Fraud by Hour|>
<br/><|{day_data}|chart|type=bar|x=Day|y[1]=Not Fraud|y[2]=Fraud|title=Distribution of Fraud by Day|>
"""
THRESHOLD_PAGE = """
# Threshold **Selection**{: .color-primary}
--------------------------------------------------------------------
## Select a threshold of confidence to filter the transactions
<|{threshold}|slider|on_change=update_threshold|lov=0.05;0.1;0.15;0.2;0.25;0.3;0.35;0.4;0.45;0.5;0.55;0.6;0.65;0.7;0.75;0.8;0.85;0.9;0.95|>
<|layout|columns=1 2|
<|{confusion_data}|chart|type=heatmap|z=Values|x=Predicted|y=Actual|layout={confusion_layout}|options={confusion_options}|plot_config={confusion_config}|height=70vh|>
<|card
<|{selected_table}|selector|lov=True Positives;False Positives;True Negatives;False Negatives|on_change=update_table|dropdown=True|>
<|{displayed_table}|table|style=fraud_style|filter|rebuild|>
|>
|>
"""
pages = {
"/": ROOT,
"Transactions": TRANSACTIONS_PAGE,
"Analysis": ANALYSIS_PAGE,
"Fraud-Distribution": CHART_PAGE,
"Threshold-Selection": THRESHOLD_PAGE,
}
Gui(pages=pages).run(title="Fraud Detection Demo", dark_mode=False, debug=True)
|
# Create app for dask_taipy_bigdata_DEMO algo.py
import time
import pandas as pd
import dask.dataframe as dd
def task1(path_to_original_data: str):
print("__________________________________________________________")
print("1. TASK 1: DATA PREPROCESSING AND CUSTOMER SCORING ...")
start_time = time.perf_counter() # Start the timer
# Step 1: Read data using Dask
df = dd.read_csv(path_to_original_data)
# Step 2: Simplify the customer scoring formula
df['CUSTOMER_SCORE'] = (
0.5 * df['TotalPurchaseAmount'] / 1000 +
0.3 * df['NumberOfPurchases'] / 10 +
0.2 * df['AverageReviewScore']
)
# Save all customers to a new CSV file
scored_df = df[["CUSTOMER_SCORE", "TotalPurchaseAmount", "NumberOfPurchases", "TotalPurchaseTime"]]
pd_df = scored_df.compute()
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return pd_df
def task2(scored_df, payment_threshold, score_threshold):
print("__________________________________________________________")
print("2. TASK 2: FEATURE ENGINEERING AND SEGMENTATION ...")
payment_threshold, score_threshold = float(payment_threshold), float(score_threshold)
start_time = time.perf_counter() # Start the timer
df = scored_df
# Feature: Indicator if customer's total purchase is above the payment threshold
df['HighSpender'] = (df['TotalPurchaseAmount'] > payment_threshold).astype(int)
# Feature: Average time between purchases
df['AverageTimeBetweenPurchases'] = df['TotalPurchaseTime'] / df['NumberOfPurchases']
# Additional computationally intensive features
df['Interaction1'] = df['TotalPurchaseAmount'] * df['NumberOfPurchases']
df['Interaction2'] = df['TotalPurchaseTime'] * df['CUSTOMER_SCORE']
df['PolynomialFeature'] = df['TotalPurchaseAmount'] ** 2
# Segment customers based on the score_threshold
df['ValueSegment'] = ['High Value' if score > score_threshold else 'Low Value' for score in df['CUSTOMER_SCORE']]
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return df
def task3(df: pd.DataFrame, metric):
print("__________________________________________________________")
print("3. TASK 3: SEGMENT ANALYSIS ...")
start_time = time.perf_counter() # Start the timer
# Detailed analysis for each segment: mean/median of various metrics
segment_analysis = df.groupby('ValueSegment').agg({
'CUSTOMER_SCORE': metric,
'TotalPurchaseAmount': metric,
'NumberOfPurchases': metric,
'TotalPurchaseTime': metric,
'HighSpender': 'sum', # Total number of high spenders in each segment
'AverageTimeBetweenPurchases': metric
}).reset_index()
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return segment_analysis
def task4(df: pd.DataFrame, segment_analysis: pd.DataFrame, summary_statistic_type: str):
print("__________________________________________________________")
print("4. TASK 4: ADDITIONAL ANALYSIS BASED ON SEGMENT ANALYSIS ...")
start_time = time.perf_counter() # Start the timer
# Filter out the High Value customers
high_value_customers = df[df['ValueSegment'] == 'High Value']
# Use summary_statistic_type to calculate different types of summary statistics
if summary_statistic_type == 'mean':
average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].mean()
elif summary_statistic_type == 'median':
average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].median()
elif summary_statistic_type == 'max':
average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].max()
elif summary_statistic_type == 'min':
average_purchase_high_value = high_value_customers['TotalPurchaseAmount'].min()
median_score_high_value = high_value_customers['CUSTOMER_SCORE'].median()
# Fetch the summary statistic for 'TotalPurchaseAmount' for High Value customers from segment_analysis
segment_statistic_high_value = segment_analysis.loc[segment_analysis['ValueSegment'] == 'High Value', 'TotalPurchaseAmount'].values[0]
# Create a DataFrame to hold the results
result_df = pd.DataFrame({
'SummaryStatisticType': [summary_statistic_type],
'AveragePurchaseHighValue': [average_purchase_high_value],
'MedianScoreHighValue': [median_score_high_value],
'SegmentAnalysisHighValue': [segment_statistic_high_value]
})
end_time = time.perf_counter() # Stop the timer
execution_time = (end_time - start_time) * 1000 # Calculate the time in milliseconds
print(f"Time of Execution: {execution_time:.4f} ms")
return result_df
if __name__ == "__main__":
t1 = task1("data/SMALL_amazon_customers_data.csv")
t2 = task2(t1, 1500, 1.5)
t3 = task3(t2, "mean")
t4 = task4(t2, t3, "mean")
print(t4)
|
# Create app for demo-image-classification-part-2 readme.md
# Image Classification Part 2 Using Taipy Core
## Usage
- [Usage](#usage)
- [Image Classification Part 2](#what-is-image-classification-part-2)
- [Directory Structure](#directory-structure)
- [License](#license)
- [Installation](#installation)
- [Contributing](#contributing)
- [Code of conduct](#code-of-conduct)
## What is Image Classification Part 2
Taipy is a Python library for creating Business Applications. More information on our
[website](https://www.taipy.io).
[Image Classification Part 2](https://github.com/Avaiga/image-classification-part-2) is about how to use Taipy Core and Taipy Studio to efficiently create and manage Data and ML pipelines.
### Demo Type
- **Level**: Intermediate
- **Topic**: Taipy-CORE
- **Components/Controls**:
- Taipy CORE: configs, Taipy Studio
## How to run
This demo works with a Python version superior to 3.8. Install the dependencies of the *Pipfile* and run the *main.py*.
## Introduction
The Demo is the second part of the Image Classification App using Taipy and Tensorflow, and it is recommended to watch the first part or go through the repo to understand the main functions and tasks.
The video on Youtubecovers using Taipy Core and Taipy Studio to build pipelines and manage different scenarios. The demo covers copying necessary functions into a script file, configuring datanodes, specifying functions for tasks, configuring pipelines, and executing the scenario with Taipy Studio and/or Taipy Core.
## Directory Structure
- `src/`: Contains the demo source code.
- `docs/`: contains the images for the documentation
- `CODE_OF_CONDUCT.md`: Code of conduct for members and contributors of _image-classification-part-2_.
- `CONTRIBUTING.md`: Instructions to contribute to _image-classification-part-2_.
- `INSTALLATION.md`: Instructions to install _image-classification-part-2_.
- `LICENSE`: The Apache 2.0 License.
- `Pipfile`: File used by the Pipenv virtual environment to manage project dependencies.
- `README.md`: Current file.
## License
Copyright 2022 Avaiga Private Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
[http://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0.txt)
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
## Installation
Want to install _image-classification-part-2_? Check out our [`INSTALLATION.md`](INSTALLATION.md) file.
## Contributing
Want to help build _image-classification-part-2_? Check out our [`CONTRIBUTING.md`](CONTRIBUTING.md) file.
## Code of conduct
Want to be part of the _image-classification-part-2_ community? Check out our [`CODE_OF_CONDUCT.md`](CODE_OF_CONDUCT.md) file.
|
# Create app for demo-image-classification-part-2 config_from_tp_studio.py
from main_functions import *
from taipy import Config
import taipy as tp
Config.load('built_with_tp_studio.toml')
scenario_cfg = Config.scenarios['testing_scenario']
tp.Core().run()
main_scenario = tp.create_scenario(scenario_cfg)
tp.submit(main_scenario)
|
# Create app for demo-image-classification-part-2 main_functions.py
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import pandas as pd
import numpy as np
class_names = ['AIRPLANE', 'AUTOMOBILE', 'BIRD', 'CAT', 'DEER', 'DOG', 'FROG', 'HORSE', 'SHIP', 'TRUCK']
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train / 255.0
y_train = to_categorical(y_train, len(class_names))
x_test = x_test / 255.0
y_test = to_categorical(y_test, len(class_names))
def tf_read(path: str): return tf.keras.models.load_model(path)
def tf_write(model, path: str):model.save(path)
#Task 1.1: Building the base model
def initialize_model(loss_f):
# Creating model base
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding='same',))
model.add(layers.MaxPool2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss=loss_f,
metrics=['accuracy'])
return model
#Task 1.2: Initial training witha fixed number of epochs
datagen = ImageDataGenerator(
horizontal_flip=True,
width_shift_range=3/32,
height_shift_range=3/32
)
def initial_model_training(n_epochs, model):
print("INITIAL MODEL TRAINING STARTED: ")
h = model.fit(
datagen.flow(x_train, y_train, batch_size=64),
epochs=n_epochs,
validation_data=(x_test, y_test))
training_result = pd.DataFrame.from_dict(h.history)
training_result["N_Epochs"] = range(1,len(training_result)+1)
return training_result, model
#Task 2.1: Merge train with a chosen number of epochs (training + validation set as training)
def merged_train(number_of_epochs,model):
print("MERGED TRAIN STARTED: ")
# merge the training and validation sets
x_all = np.concatenate((x_train, x_test))
y_all = np.concatenate((y_train, y_test))
h = model.fit(
datagen.flow(x_all, y_all, batch_size=64),
epochs=number_of_epochs)
training_result = pd.DataFrame.from_dict(h.history)
training_result["N_Epochs"] = range(1,len(training_result)+1)
return training_result, model
#Task 2.2: Predict image class
def predict_image(image_path, trained_model):
print("PREDICTION TASK STARTED: ")
img_array = tf.keras.utils.load_img(image_path, target_size=(32, 32))
image = tf.keras.utils.img_to_array(img_array)
image = np.expand_dims(image, axis=0) / 255.
prediction_result = class_names[np.argmax(trained_model.predict(image))]
print("Prediction result: {}".format(prediction_result))
return prediction_result
|
# Create app for demo-image-classification-part-2 main.py
from main_functions import *
from taipy import Config
import taipy as tp
#######################################################################################################
##############################################PIPELINE 1###############################################
#######################################################################################################
###TASK 1.1: Building the base model
#input dn
loss_fn_cfg = Config.configure_data_node("loss_fn", default_data='categorical_crossentropy')
#output dn
base_model_cfg = Config.configure_generic_data_node("base_model",
read_fct=tf_read, read_fct_params=('models/base_model',),
write_fct=tf_write, write_fct_params=('models/base_model',))
#task
BUILD_CNN_BASE_cfg = Config.configure_task("BUILD_CNN_BASE",
initialize_model,
loss_fn_cfg,
base_model_cfg)
###TASK 1.2: Initial training with a fixed number of epochs
#input dn
initial_n_epochs_cfg = Config.configure_data_node("initial_n_epochs", default_data=30)
#output dn
initial_train_perf_cfg = Config.configure_data_node("initial_train_perf")
trained_initial_model_cfg = Config.configure_generic_data_node("trained_initial_model",
read_fct=tf_read, read_fct_params=('models/trained_initial_model',),
write_fct=tf_write, write_fct_params=('models/trained_initial_model',))
#task
INITIAL_TRAIN_cfg = Config.configure_task("INITIAL_TRAIN",
initial_model_training,
[initial_n_epochs_cfg, base_model_cfg],
[initial_train_perf_cfg, trained_initial_model_cfg])
#pipeline
pipeline_1_cfg = Config.configure_pipeline("pipeline_1",
[BUILD_CNN_BASE_cfg,
INITIAL_TRAIN_cfg])
#######################################################################################################
##############################################PIPELINE 2###############################################
#######################################################################################################
###TASK 2.1: Merge train with a chosen number of epochs (training + validation set as training)
#input dn
optimal_n_epochs_cfg = Config.configure_data_node("optimal_n_epochs", default_data=13)
#output dn
merged_train_perf_cfg = Config.configure_data_node("merged_train_perf")
merged_trained_model_cfg = Config.configure_generic_data_node("merged_trained_model",
read_fct=tf_read, read_fct_params=('models/merged_trained_model',),
write_fct=tf_write, write_fct_params=('models/merged_trained_model',))
#task
MERGED_TRAIN_cfg = Config.configure_task("MERGED_TRAIN",
merged_train,
[optimal_n_epochs_cfg, base_model_cfg],
[merged_train_perf_cfg, merged_trained_model_cfg])
###TASK 2.2: Make a prediction from an image path
#input dn: the trained model datanode, already set up
image_path_dn_cfg = Config.configure_data_node("image_path_dn", default_data="test_images/dog.jpg")
#output dn
prediction_cfg = Config.configure_data_node("image_prediction")
#task
IMAGE_PREDICT_cfg = Config.configure_task("IMAGE_PREDICT", predict_image,
[image_path_dn_cfg, merged_trained_model_cfg],
[prediction_cfg])
#pipeline
pipeline_2_cfg = Config.configure_pipeline("pipeline_2",
[MERGED_TRAIN_cfg,
IMAGE_PREDICT_cfg])
#######################################################################################################
##############################################Scenario#################################################
#######################################################################################################
scenario_cfg = Config.configure_scenario("testing_scenario",
[pipeline_1_cfg, pipeline_2_cfg])
tp.Core().run()
main_scenario = tp.create_scenario(scenario_cfg)
tp.submit(main_scenario)
Config.export("tpcore.toml")
|
# Create app for demo-edit-log LICENSE.md
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Avaiga Private Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
# Create app for demo-edit-log main.py
from taipy.gui import Gui
import taipy as tp
from taipy.gui import notify
from config.config import *
# Variables for bindings
all_scenarios = [] # List of scenarios
all_scenarios_configs = [] # List of scenario configs
all_data_nodes = [] # List of node IDs
current_scenario = None
current_data_node = None
current_scenario_config = None
scenario_name = None
edits = [["","",""]]
value = None
commit_message = ""
create_scenario_dialog_visible = False
set_value_dialog_visible = False
# ====================================================================
def on_init(state):
state.all_scenarios = [(sc.id, sc.name) for sc in tp.get_scenarios()]
state.all_scenarios_configs = [sc.id for sc in Config.scenarios.values()]
def on_change_current_scenario(state):
scenario = tp.get(state.current_scenario[0])
# Propagate to list of nodes:
state.all_data_nodes = [(dn.id, dn.config_id) for dn in scenario.data_nodes.values()]
def on_change(state, var_name: str, var_value):
if var_name == "all_data_nodes":
# Propagate to current data node (pick any...):
if var_value and len(var_value) > 0:
data_node = next(iter(var_value))
state.current_data_node = data_node
if var_name == "current_data_node":
# Propagate to list of edits:
refresh_edit_log(state)
def refresh_edit_log(state):
# Forces a refresh of the edit log:
if state.current_data_node:
data_node_id = state.current_data_node[0]
data_node = tp.get(data_node_id)
state.edits = get_edit_log(data_node) if data_node else []
def create_scenario_clicked(state):
state.scenario_name = None
state.create_scenario_dialog_visible = True
def get_edit_log(data_node):
def _get_edit_fields(edit):
return [str(edit.get("timestamp")), edit.get("job_id"), edit.get("message")]
return [_get_edit_fields(edit) for edit in data_node.edits] if data_node else []
def on_submit_button_clicked(state):
scenario_id = state.current_scenario[0]
scenario = tp.get(scenario_id)
tp.submit(scenario)
# Force refresh of current data node:
refresh_edit_log(state)
notify(state, message=f"Scenario {scenario.name} submitted!")
def on_set_value_clicked(state):
state.set_value_dialog_visible = True
def create_scenario_dialog_action(state, id, action, payload):
state.create_scenario_dialog_visible = False
btn_idx = payload["args"][0]
if btn_idx == 0: # OK button
scenario_cfg = Config.scenarios[state.current_scenario_config]
name = state.scenario_name
scenario = tp.create_scenario(config=scenario_cfg, name=name)
all_scenarios = state.all_scenarios
all_scenarios.append((scenario.id, scenario.name))
state.all_scenarios = all_scenarios
notify(state, message=f"Scenario {scenario.name} created!")
def set_value_dialog_action(state, id, action, payload):
btn_idx = payload["args"][0]
if btn_idx == 0: # OK button
data_node_id = state.current_data_node[0]
node = tp.get(data_node_id)
node.write(state.value, message=state.commit_message)
state.current_data_node = state.current_data_node
state.set_value_dialog_visible = False
history_table_columns = {
"0": {"title": "Date"},
"1": {"title": "Job Id"},
"2": {"title": "Comments"},
}
scenario_manager_page = """
<|part|class_name=card|
## Data Node Selection
<|{current_scenario}|selector|lov={all_scenarios}|dropdown|label=<select a scenario>|on_change=on_change_current_scenario|>
<|{current_data_node}|selector|lov={all_data_nodes}|dropdown|label=<select a data node>|>
<|Create New Scenario...|button|on_action=create_scenario_clicked|>
<|Run Scenario|button|active={current_scenario is not None}|on_action=on_submit_button_clicked|>
|>
<|part|class_name=card|
## Data Node Edit Log
<|{edits}|table|columns={history_table_columns}|width=50vw|>
<|Refresh|button|on_action=refresh_edit_log|>
<|Set value...|button|active={len(edits) > 0}|on_action=on_set_value_clicked|>
|>
<|{create_scenario_dialog_visible}|dialog|title=Create Scenario|labels=OK;Cancel|on_action=create_scenario_dialog_action|
Select a scenario config:
<|{current_scenario_config}|selector|dropdown|lov={all_scenarios_configs}|>
Enter a name for your scenario:
<|{scenario_name}|input|change_delay=10|>
|>
<|{set_value_dialog_visible}|dialog|title=Set value|labels=OK;Cancel|change_delay=10|on_action=set_value_dialog_action|
<|{value}|input|label=Enter a value|>
<|Optional commit message|expandable|expanded=False|
<|{commit_message}|input|>
|>
|>
"""
if __name__ == "__main__":
gui = Gui(page=scenario_manager_page)
core = tp.Core()
tp.run(core, gui, port=8080, dark_mode=False)
|
# Create app for demo-edit-log config.py
from algos.algos import task_function
from taipy import Config
Config.configure_job_executions(mode="standalone", max_nb_of_workers=1)
node_start_cfg = Config.configure_data_node(
id="node_start", default_data=[1, 2], description="This is the initial data node."
)
node_end_cfg = Config.configure_data_node(id="node_end", description="This is the result data node.")
task_cfg = Config.configure_task(id="task", input=[node_start_cfg], output=node_end_cfg, function=task_function)
pipeline_cfg = Config.configure_pipeline(id="pipeline", task_configs=[task_cfg])
Config.configure_scenario("My_super_scenario", [pipeline_cfg])
|
# Create app for demo-edit-log algos.py
def task_function(data):
"""A dummy task function"""
print(f"Executing function: {data}")
return data
|
# Create app for demo-face-recognition LICENSE.md
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Avaiga Private Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
# Create app for demo-face-recognition find_taipy_gui_dir.py
# This Python script tries to locate the taipy.gui package, and
# prints its absolute path if it finds it.
import importlib.util
import os
taipy_gui = importlib.util.find_spec("taipy.gui")
if taipy_gui is None:
print("Cannot find 'taipy.gui'\nPlease run 'pip install taipy-gui'.")
else:
print(f"Taipy GUI location: {os.path.dirname(taipy_gui.origin)}")
|
# Create app for demo-face-recognition GETTING_STARTED.md
# Getting Started
## Installation
First you need to install the dependencies and build the front-end. Please refer to [INSTALLATION.md](INSTALLATION.md).
## How to use the demo
Once you started the application, your default Web browser should open automatically. If not, open this URL: [http://127.0.0.1:9090](http://127.0.0.1:9090).
The browser should ask you for the authorization to use the camera. Press "Allow".
<p align="center">
<img src="first_startup.png" alt="drawing" width="400"/>
</p>
Once allowed, your camera should activate and you will see a live view of the video. Notice that your face your already be detected and the label "None" is displayed. This is because the application does not know you yet.
<p align="center">
<img src="app_running.png" alt="drawing" width="400"/>
</p>
To train the app to recognize your face, press the "Capture" button. This will show a dialog with a captured image. Enter a name for that face and press "validate".
The more training examples, the better. So add few more captured images of your faces.
Notice that the case of the given name is important. So always use the same name for captured image.
Example: "Greg" and "greg" will be considered as two different names.
<p align="center">
<img src="captured_image.png" alt="drawing" width="400"/>
</p>
After say 6 different images, you can ask the system to learn from them by pressing the "Re-train" button.
Depending on the number of images to process, this can take from a second to a dozen of seconds.
The application will then be able to recognize the new face, and the name should be displayed on screen!
<p align="center">
<img src="face_recognized.png" alt="drawing" width="400"/>
</p>
|
# Create app for demo-face-recognition main.py
from taipy.gui import Gui
from webcam import Webcam
import cv2
import PIL.Image
import io
import logging
import uuid
from pathlib import Path
from demo.faces import detect_faces, recognize_face, train_face_recognizer
logging.basicConfig(level=logging.DEBUG)
training_data_folder = Path("images")
show_capture_dialog = False
capture_image = False
show_add_captured_images_dialog = False
labeled_faces = [] # Contains rect with label (for UI component)
captured_image = None
captured_label = ""
def on_action_captured_image(state, id, action, payload):
print("Captured image")
choice = payload["args"][0]
if choice == 0:
# Add image to training data:
img = state.captured_image
file_name = str(uuid.uuid4()) + ".jpg"
label = state.captured_label
image_path = Path(training_data_folder, file_name)
with image_path.open("wb") as f:
f.write(img)
label_file_path = Path(training_data_folder, "data.csv")
with label_file_path.open("a") as f:
f.write(f"{file_name},{label}\n")
state.captured_image = None
state.captured_label = ""
state.show_capture_dialog = False
def process_image(state, frame):
print("Processing image...")
found = detect_faces(frame)
labeled_images = []
for rect, img in found:
(label, _) = recognize_face(img)
labeled_images.append((img, rect, label))
# Return this to the UI component so that it can display a rect around recognized faces:
state.labeled_faces = [str([*rect, label]) for (_, rect, label) in labeled_images]
# Capture image (actually we consider only the first detected face)
if state.capture_image and len(labeled_images) > 0:
img = labeled_images[0][0]
label = labeled_images[0][2]
state.captured_image = cv2.imencode(".jpg", img)[1].tobytes()
state.captured_label = label
state.show_capture_dialog = True
state.capture_image = False
def handle_image(state, action, args, value):
print("Handling image...")
payload = value["args"][0]
bytes = payload["data"]
logging.debug(f"Received data: {len(bytes)}")
temp_path = "temp.png"
# Write Data into temp file (OpenCV is unable to load from memory)
image = PIL.Image.open(io.BytesIO(bytes))
image.save(temp_path)
# Load image file
try:
img = cv2.imread(temp_path, cv2.IMREAD_UNCHANGED)
except cv2.error as e:
logging.error(f"Failed to read image file: {e}")
return
process_image(state, img)
# Finish. Tempfile is removed.
def button_retrain_clicked(state):
print("Retraining...")
train_face_recognizer(training_data_folder)
webcam_md = """<|toggle|theme|>
<container|container|part|
# Face **recognition**{: .color-primary}
This demo shows how to use [Taipy](https://taipy.io/) with a [custom GUI component](https://docs.taipy.io/en/latest/manuals/gui/extension/) to capture video from your webcam and do realtime face detection. What this application demonstrates:
- How to build a complex custom UI component for Taipy.
- How to detect and recognize faces in the image in real time using [OpenCV](https://opencv.org/).
<br/>
<card|card p-half|part|
## **Webcam**{: .color-primary} component
<|text-center|part|
<|webcam.Webcam|faces={labeled_faces}|classname=face_detector|id=my_face_detector|on_data_receive=handle_image|sampling_rate=100|>
<|Capture|button|on_action={lambda s: s.assign("capture_image", True)}|>
<|RE-train|button|on_action=button_retrain_clicked|>
>
|card>
|container>
<|{show_capture_dialog}|dialog|labels=Validate;Cancel|on_action=on_action_captured_image|title=Add new training image|
<|{captured_image}|image|width=300px|height=300px|>
<|{captured_label}|input|>
|>
"""
if __name__ == "__main__":
# Create dir where the pictures will be stored
if not training_data_folder.exists():
training_data_folder.mkdir()
train_face_recognizer(training_data_folder)
gui = Gui(webcam_md)
gui.add_library(Webcam())
gui.run(port=9090)
|
# Create app for demo-face-recognition faces.py
import cv2
from pathlib import Path
import os
import numpy as np
import logging
from .image import crop_image
import pandas as pd
logging.basicConfig(level=logging.DEBUG)
# Create our face detector. Both HAAR and LBP classifiers are somehow equivelent and both give good results.
# Up to you to choose one or the other.
face_detector = cv2.CascadeClassifier("classifiers/haarcascade_frontalface_default.xml")
# face_cascade = cv2.CascadeClassifier("classifiers/lbpcascade_frontalface_improved.xml")
# Create our face recognizer
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
# The subjects that can be recognized
subjects = {}
FACE_DETECTOR_SCALE_FACTOR = 1.1
FACE_DETECTOR_MIN_NEIGHBORS = 5
def detect_faces(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
detected_faces = face_detector.detectMultiScale(
gray_image,
scaleFactor=FACE_DETECTOR_SCALE_FACTOR,
minNeighbors=FACE_DETECTOR_MIN_NEIGHBORS,
)
if len(detected_faces) == 0:
return []
return [(rect, crop_image(image, rect)) for rect in detected_faces]
def recognize_face(image):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if len(subjects) == 0:
# No subject, the model hasn't been trained, let's do nothing.
return (None, None)
try:
face = face_recognizer.predict(gray_image)
except Exception:
logging.warning("Could not run face recognizer", exc_info=True)
# Return the name of the recognize subject and the confident level
return (subjects[face[0]], face[1])
def train_face_recognizer(training_data_directory="images"):
data_file_path = Path(training_data_directory, "data.csv")
if not data_file_path.exists():
# Create file
with data_file_path.open("w") as f:
f.write("image,label\n")
# Load file as CSV file
data = pd.read_csv(data_file_path, delimiter=",", header=0).to_numpy()
# Subjects that can be recognized from these data:
identified_subjects = np.unique(data[:, 1])
global subjects
if len(identified_subjects) == 0:
# No subject... We stop here
subjects = {}
return
else:
# Update subjects (persons who can be recognized)
subjects = {e[0]: e[1] for e in enumerate(identified_subjects)}
# Prepare training data
faces, labels = [], []
for row in data:
file_name = row[0]
label = np.where(identified_subjects == row[1])[0][0]
file_path = Path(training_data_directory, file_name)
if os.path.exists(file_path):
img = cv2.imread(str(file_path), cv2.IMREAD_GRAYSCALE)
faces.append(img)
labels.append(label)
# Run training!
logging.debug(f"Run training for {subjects}...")
face_recognizer.train(faces, np.array(labels))
|
# Create app for demo-face-recognition __init__.py
|
# Create app for demo-face-recognition image.py
def crop_image(img, rect):
"""An utility function to crop an image to the given rect"""
x, y, w, h = rect
return img[y : y + h, x : x + w]
|
# Create app for demo-face-recognition __init__.py
from .webcam import Webcam
|
# Create app for demo-face-recognition webcam.py
from taipy.gui.extension import ElementLibrary, Element, ElementProperty, PropertyType
class Webcam(ElementLibrary):
def get_name(self) -> str:
return "webcam"
def get_elements(self) -> dict:
return {
"Webcam": Element(
"faces",
{
"faces": ElementProperty(PropertyType.dynamic_list),
"id": ElementProperty(PropertyType.string),
"classname": ElementProperty(PropertyType.dynamic_string),
"on_data_receive": ElementProperty(PropertyType.string),
"sampling_rate": ElementProperty(PropertyType.number),
},
react_component="Webcam",
)
}
def get_scripts(self) -> list[str]:
return ["webui/dist/webcam.js"]
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 8