GurgenGulay commited on
Commit
51afa48
·
verified ·
1 Parent(s): da75edc

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +19 -10
utils.py CHANGED
@@ -1,12 +1,25 @@
1
  from transformers import pipeline
2
 
3
- # Pipeline'ı global olarak oluşturuyoruz
4
  pipe = pipeline("text2text-generation", model="google-t5/t5-base", device="cpu")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  def split_text_into_chunks(text, chunk_size=1000):
7
- """
8
- Metni belirli sayıda kelimelik parçalara böler.
9
- """
10
  words = text.split()
11
  chunks = []
12
  for i in range(0, len(words), chunk_size):
@@ -14,20 +27,16 @@ def split_text_into_chunks(text, chunk_size=1000):
14
  chunks.append(chunk)
15
  return chunks
16
 
 
17
  def generate_lesson_from_chunks(chunks):
18
- """
19
- Modeli her parça için çalıştırıp sonucu döndüren fonksiyon.
20
- """
21
  generated_texts = []
22
  for chunk in chunks:
23
  generated_text = pipe(chunk, max_length=500)[0]['generated_text']
24
  generated_texts.append(generated_text)
25
  return ' '.join(generated_texts)
26
 
 
27
  def process_large_text(text):
28
- """
29
- Büyük metni işleyecek ve sonucu döndürecek fonksiyon.
30
- """
31
  chunks = split_text_into_chunks(text, chunk_size=1000)
32
  generated_text = generate_lesson_from_chunks(chunks)
33
  return generated_text
 
1
  from transformers import pipeline
2
 
3
+
4
  pipe = pipeline("text2text-generation", model="google-t5/t5-base", device="cpu")
5
+ pipe.model.config.pad_token_id = pipe.tokenizer.eos_token_id
6
+
7
+ def generate_lesson_from_transcript(doc_text):
8
+ try:
9
+ generated_text = pipe(doc_text, max_length=100, truncation=True)[0]['generated_text']
10
+ output_path = "/tmp/generated_output.txt"
11
+
12
+ with open(output_path, "w") as file:
13
+ file.write(generated_text)
14
+
15
+ return generated_text, output_path
16
+
17
+ except Exception as e:
18
+ print(f"Bir hata oluştu: {str(e)}")
19
+ return "Bir hata oluştu", None
20
+
21
 
22
  def split_text_into_chunks(text, chunk_size=1000):
 
 
 
23
  words = text.split()
24
  chunks = []
25
  for i in range(0, len(words), chunk_size):
 
27
  chunks.append(chunk)
28
  return chunks
29
 
30
+
31
  def generate_lesson_from_chunks(chunks):
 
 
 
32
  generated_texts = []
33
  for chunk in chunks:
34
  generated_text = pipe(chunk, max_length=500)[0]['generated_text']
35
  generated_texts.append(generated_text)
36
  return ' '.join(generated_texts)
37
 
38
+
39
  def process_large_text(text):
 
 
 
40
  chunks = split_text_into_chunks(text, chunk_size=1000)
41
  generated_text = generate_lesson_from_chunks(chunks)
42
  return generated_text