oliveiracwb commited on
Commit
050fc7d
·
1 Parent(s): 507081b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -2
app.py CHANGED
@@ -1,4 +1,74 @@
 
 
 
 
1
  import streamlit as st
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import nullcontext
2
+ import torch
3
+ import tiktoken
4
+ from model import GPTConfig, GPT
5
  import streamlit as st
6
 
7
+ # -----------------------------------------------------------------------------
8
+ st.set_page_config(page_title="Translation Demo", page_icon=":milky_way:", layout="wide")
9
+ st.subheader("Gerador Canções de musica brasileira")
10
+
11
+ # ----------------------------------------------
12
+ max_new_tokens = 200 # number of tokens generated in each sample
13
+ temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
14
+ top_k = 200
15
+ seed = 1337
16
+ device = 'cpu'
17
+ dtype = 'bfloat16'
18
+ # -----------------------------------------------------------------------------
19
+
20
+ ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
21
+ ctx = nullcontext()
22
+ checkpoint = torch.load('ckpt.pt', map_location='cpu')
23
+ gptconf = GPTConfig(**checkpoint['model_args'])
24
+ model = GPT(gptconf)
25
+ state_dict = checkpoint['model']
26
+ model.load_state_dict(state_dict)
27
+
28
+
29
+
30
+ def gera_texto(start, temperature, max_new_tokens, seed, num_samples):
31
+ torch.manual_seed(seed)
32
+ enc = tiktoken.get_encoding("gpt2")
33
+ encode = lambda s: enc.encode(s, allowed_special={"\n"})
34
+ decode = lambda l: enc.decode(l)
35
+
36
+
37
+ # encode the beginning of the prompt
38
+ start_ids = encode(start)
39
+ x = (torch.tensor(start_ids, dtype=torch.long, device='cpu')[None, ...])
40
+
41
+ # run generation
42
+ geracoes = ""
43
+ with torch.no_grad():
44
+ with ctx:
45
+ for k in range(num_samples):
46
+ y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
47
+ geracoes = decode(y[0].tolist())
48
+ st.text_area("Gerado {}".format(k+1), value= geracoes, height=300, placeholder="")
49
+
50
+ with st.form("my_form"):
51
+ col1, col2, col3, col4 = st.columns(4)
52
+ with col1:
53
+ int_samples = st.slider('Exemplos', min_value=1, max_value=10, value=5, step=1)
54
+ with col2:
55
+ int_seed = st.slider('Seed', min_value=1, max_value=1500, value=1337, step=1)
56
+ with col3:
57
+ int_size = st.slider('Num Tokens', min_value=20, max_value=500, value=160, step=5)
58
+ with col4:
59
+ int_temp = st.number_input("Temperatura",min_value=0.1,max_value=2.0,value=0.8,step=0.1,format="%.1f")
60
+
61
+ source = st.text_area("Escolha uma frase inicial", value="Voce e tao linda", placeholder="Entre com o inicio da musica...")
62
+
63
+ submitted = st.form_submit_button("Gerar músicas")
64
+ if submitted:
65
+ with st.spinner("Gerando exemplos ..."):
66
+ gera_texto(source,int_temp,int_size,int_seed, int_samples)
67
+
68
+ st.write("8 milhões de tokens, 16 camadas de atenção. Três dias de treinamento from stratch")
69
+ st.write("A preparação dos dados demorou um longo final de semana.")
70
+ st.write("Agradecimentos ao [Gabriel](https://www.linkedin.com/in/go2035/) pela ajuda no scrap.")
71
+ st.markdown("""---""")
72
+ original_title = '<p style="font-family:Verdana; color:Blue; font-size: 12px;">Gosta de IA ou é um maker por natureza ? Conecte-se ao meu <a href=https://www.linkedin.com/in/israeloliveira2035/> linkedin</a> e vamos conversar !</p>'
73
+ st.markdown(original_title, unsafe_allow_html=True)
74
+ st.write("Made with [nanoGPT](https://github.com/karpathy/nanoGPT) e [ColabPro+](https://colab.research.google.com/signup)")