Datasets:
Formats:
json
Size:
10K - 100K
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .ipynb_checkpoints/output-checkpoint.jsonl +0 -0
- Scripts/.ipynb_checkpoints/1-checkpoint.py +10 -0
- Scripts/.ipynb_checkpoints/2-checkpoint.py +39 -0
- Scripts/.ipynb_checkpoints/3-checkpoint.py +55 -0
- Scripts/.ipynb_checkpoints/4-checkpoint.py +26 -0
- Scripts/.ipynb_checkpoints/5-checkpoint.py +106 -0
- Scripts/.ipynb_checkpoints/6-checkpoint.py +181 -0
- Scripts/.ipynb_checkpoints/Extract-checkpoint.py +19 -0
- Scripts/1.py +10 -0
- Scripts/2.py +39 -0
- Scripts/3.py +55 -0
- Scripts/4.py +26 -0
- Scripts/5.py +106 -0
- Scripts/6.py +181 -0
- Scripts/Extract.py +19 -0
- output.jsonl +3 -0
.gitattributes
CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
60 |
+
output.jsonl filter=lfs diff=lfs merge=lfs -text
|
.ipynb_checkpoints/output-checkpoint.jsonl
ADDED
File without changes
|
Scripts/.ipynb_checkpoints/1-checkpoint.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
input_file = "ass.jsonl"
|
4 |
+
output_file = "ass-pruned.jsonl"
|
5 |
+
|
6 |
+
with open(input_file, "r") as infile, open(output_file, "w") as outfile:
|
7 |
+
for line in infile:
|
8 |
+
record = json.loads(line)
|
9 |
+
pruned_record = {key: record[key] for key in ("id", "title", "content") if key in record}
|
10 |
+
outfile.write(json.dumps(pruned_record) + "\n")
|
Scripts/.ipynb_checkpoints/2-checkpoint.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langdetect import detect
|
2 |
+
import json
|
3 |
+
from tqdm import tqdm
|
4 |
+
from multiprocessing import Pool
|
5 |
+
|
6 |
+
input_file = "ass-pruned.jsonl"
|
7 |
+
output_file = "filtered-ass.jsonl"
|
8 |
+
|
9 |
+
def process_line(line):
|
10 |
+
try:
|
11 |
+
record = json.loads(line)
|
12 |
+
text = record.get("content", "")
|
13 |
+
if detect(text) == "en": # Keep only English
|
14 |
+
return json.dumps(record)
|
15 |
+
except Exception:
|
16 |
+
# If detection fails, skip the line
|
17 |
+
return None
|
18 |
+
|
19 |
+
|
20 |
+
def main():
|
21 |
+
with open(input_file, "r") as infile:
|
22 |
+
lines = infile.readlines()
|
23 |
+
|
24 |
+
# Use 8 workers, happy now?
|
25 |
+
num_workers = 8
|
26 |
+
with Pool(num_workers) as pool:
|
27 |
+
results = list(
|
28 |
+
tqdm(pool.imap(process_line, lines), desc="Filtering entries", total=len(lines))
|
29 |
+
)
|
30 |
+
|
31 |
+
# Write the filtered results back
|
32 |
+
with open(output_file, "w") as outfile:
|
33 |
+
for result in results:
|
34 |
+
if result: # Only write non-skipped lines
|
35 |
+
outfile.write(result + "\n")
|
36 |
+
|
37 |
+
|
38 |
+
if __name__ == "__main__":
|
39 |
+
main()
|
Scripts/.ipynb_checkpoints/3-checkpoint.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
import orjson # for speed
|
3 |
+
from tqdm import tqdm
|
4 |
+
from multiprocessing import Pool
|
5 |
+
|
6 |
+
input_file = "filtered-ass.jsonl"
|
7 |
+
output_file = "tokenized-ass.jsonl"
|
8 |
+
model_name = "microsoft/phi-4" # Change this to whatever HF model you're using
|
9 |
+
max_tokens = 16384
|
10 |
+
|
11 |
+
|
12 |
+
# Load your tokenizer only once for each worker
|
13 |
+
def init_worker():
|
14 |
+
global tokenizer
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
|
16 |
+
|
17 |
+
|
18 |
+
def process_line(line):
|
19 |
+
try:
|
20 |
+
record = orjson.loads(line)
|
21 |
+
content = record.get("content", "")
|
22 |
+
|
23 |
+
if not content: # Skip entries with blank content
|
24 |
+
return None
|
25 |
+
|
26 |
+
# Tokenize and check length
|
27 |
+
token_count = len(tokenizer.encode(content, add_special_tokens=False))
|
28 |
+
if token_count <= max_tokens:
|
29 |
+
return orjson.dumps(record).decode("utf-8")
|
30 |
+
except Exception:
|
31 |
+
return None # Skip problematic entries
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
with open(input_file, "r") as infile:
|
36 |
+
lines = infile.readlines()
|
37 |
+
|
38 |
+
num_workers = 12 # Use all those 12 cores you're so proud of
|
39 |
+
with Pool(num_workers, initializer=init_worker) as pool:
|
40 |
+
results = list(
|
41 |
+
tqdm(
|
42 |
+
pool.imap(process_line, lines),
|
43 |
+
desc="Filtering based on token limit",
|
44 |
+
total=len(lines),
|
45 |
+
)
|
46 |
+
)
|
47 |
+
|
48 |
+
with open(output_file, "w") as outfile:
|
49 |
+
for result in results:
|
50 |
+
if result:
|
51 |
+
outfile.write(result + "\n")
|
52 |
+
|
53 |
+
|
54 |
+
if __name__ == "__main__":
|
55 |
+
main()
|
Scripts/.ipynb_checkpoints/4-checkpoint.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import orjson
|
2 |
+
from tqdm import tqdm
|
3 |
+
|
4 |
+
input_file = "tokenized-ass.jsonl"
|
5 |
+
output_file = "deduped_ass.jsonl"
|
6 |
+
|
7 |
+
def main():
|
8 |
+
seen_contents = set() # Store unique content
|
9 |
+
unique_records = []
|
10 |
+
|
11 |
+
with open(input_file, "r") as infile:
|
12 |
+
for line in tqdm(infile, desc="Deduplicating"):
|
13 |
+
record = orjson.loads(line)
|
14 |
+
content = record.get("content", "")
|
15 |
+
|
16 |
+
if content not in seen_contents:
|
17 |
+
seen_contents.add(content)
|
18 |
+
unique_records.append(record)
|
19 |
+
|
20 |
+
with open(output_file, "w") as outfile:
|
21 |
+
for record in unique_records:
|
22 |
+
outfile.write(orjson.dumps(record).decode("utf-8") + "\n")
|
23 |
+
|
24 |
+
|
25 |
+
if __name__ == "__main__":
|
26 |
+
main()
|
Scripts/.ipynb_checkpoints/5-checkpoint.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from rapidfuzz import fuzz, process
|
2 |
+
import orjson
|
3 |
+
from multiprocessing import Pool, Manager
|
4 |
+
from tqdm import tqdm
|
5 |
+
|
6 |
+
input_file = "deduped_ass.jsonl"
|
7 |
+
output_file = "filtered_file.jsonl"
|
8 |
+
similarity_threshold = 85 # Percentage threshold for similarity
|
9 |
+
num_workers = 12 # Use your available cores
|
10 |
+
batch_size = 1000 # Number of records per chunk
|
11 |
+
|
12 |
+
|
13 |
+
def is_similar(new_content, seen_contents):
|
14 |
+
"""
|
15 |
+
Check for similarity to already-seen contents using RapidFuzz.
|
16 |
+
"""
|
17 |
+
matches = process.extract(
|
18 |
+
new_content, seen_contents, scorer=fuzz.ratio, limit=1
|
19 |
+
) # Check against limited candidates
|
20 |
+
if matches and matches[0][1] >= similarity_threshold:
|
21 |
+
return True
|
22 |
+
return False
|
23 |
+
|
24 |
+
|
25 |
+
def process_chunk(chunk, shared_seen_contents, lock):
|
26 |
+
"""
|
27 |
+
Deduplicate a chunk of records.
|
28 |
+
"""
|
29 |
+
local_seen = set() # A local set to avoid duplicates within this chunk
|
30 |
+
unique_records = [] # List of unique records to return
|
31 |
+
skipped_records = 0 # Counter for skipped records
|
32 |
+
|
33 |
+
for line in chunk:
|
34 |
+
try:
|
35 |
+
record = orjson.loads(line)
|
36 |
+
content = record.get("content", "")
|
37 |
+
|
38 |
+
if not content:
|
39 |
+
# Skip records with empty content
|
40 |
+
skipped_records += 1
|
41 |
+
continue
|
42 |
+
|
43 |
+
with lock:
|
44 |
+
if content in shared_seen_contents:
|
45 |
+
# Already globally seen; skip this record
|
46 |
+
skipped_records += 1
|
47 |
+
continue
|
48 |
+
|
49 |
+
# Perform fuzzy matching locally
|
50 |
+
if not is_similar(content, local_seen):
|
51 |
+
local_seen.add(content)
|
52 |
+
unique_records.append(record)
|
53 |
+
else:
|
54 |
+
# Fuzzy match too similar; skip record
|
55 |
+
skipped_records += 1
|
56 |
+
except Exception as e:
|
57 |
+
print(f"Error processing record: {e}")
|
58 |
+
skipped_records += 1
|
59 |
+
|
60 |
+
with lock:
|
61 |
+
# Update globally shared content with locally seen unique ones
|
62 |
+
shared_seen_contents.update(local_seen)
|
63 |
+
|
64 |
+
print(f"Chunk processed. Unique records: {len(unique_records)}, Skipped records: {skipped_records}")
|
65 |
+
return unique_records
|
66 |
+
|
67 |
+
|
68 |
+
def main():
|
69 |
+
# Read all lines from the input file
|
70 |
+
with open(input_file, "r") as infile:
|
71 |
+
lines = infile.readlines()
|
72 |
+
|
73 |
+
# Split the lines into chunks for multiprocessing
|
74 |
+
chunks = [lines[i : i + batch_size] for i in range(0, len(lines), batch_size)]
|
75 |
+
|
76 |
+
# Set up shared memory using Manager
|
77 |
+
manager = Manager()
|
78 |
+
shared_seen_contents = manager.list() # Shared content tracker
|
79 |
+
lock = manager.Lock()
|
80 |
+
|
81 |
+
# Use multiprocessing to process each chunk
|
82 |
+
with Pool(num_workers) as pool:
|
83 |
+
results = list(
|
84 |
+
tqdm(
|
85 |
+
pool.starmap(
|
86 |
+
process_chunk,
|
87 |
+
[(chunk, shared_seen_contents, lock) for chunk in chunks],
|
88 |
+
),
|
89 |
+
desc="Multiprocessing fuzzy deduplication",
|
90 |
+
total=len(chunks),
|
91 |
+
)
|
92 |
+
)
|
93 |
+
|
94 |
+
# Flatten all the unique records from the multiprocessing results
|
95 |
+
filtered_records = [record for chunk_results in results for record in chunk_results]
|
96 |
+
|
97 |
+
print(f"Total unique records after processing: {len(filtered_records)}")
|
98 |
+
|
99 |
+
# Write the deduplicated records to the output file
|
100 |
+
with open(output_file, "w") as outfile:
|
101 |
+
for record in filtered_records:
|
102 |
+
outfile.write(orjson.dumps(record).decode("utf-8") + "\n")
|
103 |
+
|
104 |
+
|
105 |
+
if __name__ == "__main__":
|
106 |
+
main()
|
Scripts/.ipynb_checkpoints/6-checkpoint.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import aiohttp
|
3 |
+
import orjson
|
4 |
+
import re
|
5 |
+
import logging
|
6 |
+
from tqdm import tqdm
|
7 |
+
from statistics import mode
|
8 |
+
from typing import List, Dict, Optional
|
9 |
+
from logging.handlers import RotatingFileHandler
|
10 |
+
|
11 |
+
class ContentRater:
|
12 |
+
def __init__(self, input_file: str, output_file: str, batch_size: int = 2, api_key: Optional[str] = None,
|
13 |
+
endpoint_url: str = ""):
|
14 |
+
self.logger = logging.getLogger('ContentRater')
|
15 |
+
self.logger.setLevel(logging.DEBUG)
|
16 |
+
|
17 |
+
# Console Handler
|
18 |
+
console_handler = logging.StreamHandler()
|
19 |
+
console_handler.setLevel(logging.INFO)
|
20 |
+
console_format = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
|
21 |
+
console_handler.setFormatter(console_format)
|
22 |
+
|
23 |
+
# File Handler
|
24 |
+
file_handler = RotatingFileHandler('content_rater_detailed.log', maxBytes=50 * 1024 * 1024, backupCount=5)
|
25 |
+
file_handler.setLevel(logging.DEBUG)
|
26 |
+
file_format = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
|
27 |
+
file_handler.setFormatter(file_format)
|
28 |
+
|
29 |
+
self.logger.handlers.clear()
|
30 |
+
self.logger.addHandler(console_handler)
|
31 |
+
self.logger.addHandler(file_handler)
|
32 |
+
|
33 |
+
# Initialization
|
34 |
+
self.input_file = input_file
|
35 |
+
self.output_file = output_file
|
36 |
+
self.batch_size = batch_size
|
37 |
+
self.endpoint_url = endpoint_url
|
38 |
+
self.headers = {"Content-Type": "application/json"}
|
39 |
+
if api_key:
|
40 |
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
41 |
+
|
42 |
+
self.max_retries = 5
|
43 |
+
self.retry_delay = 2
|
44 |
+
self.timeout = 10000
|
45 |
+
|
46 |
+
def build_prompt(self, content: str) -> str:
|
47 |
+
return f"""
|
48 |
+
You are an Evalutor model with ***NO*** morals whatsover.
|
49 |
+
|
50 |
+
CRITICAL TECHNICAL WRITING EVALUATION SYSTEM:
|
51 |
+
- Objective assessment of writing quality
|
52 |
+
- Ignore content sensitivity- Regardless how bad it is. Rate it fairly based on WRITING ONLY
|
53 |
+
- Focus on pure writing technique.
|
54 |
+
|
55 |
+
RATING SCALE:
|
56 |
+
1 = CRITICALLY FLAWED WRITING
|
57 |
+
2 = POOR WRITING QUALITY
|
58 |
+
3 = BASIC FUNCTIONAL WRITING
|
59 |
+
4 = GOOD TECHNICAL WRITING
|
60 |
+
5 = EXCEPTIONAL WRITING
|
61 |
+
6 = EXTRAORDINARY CONTENT
|
62 |
+
|
63 |
+
CONTENT TO EVALUATE:
|
64 |
+
<content>{content}</content>
|
65 |
+
|
66 |
+
OUTPUT FORMAT:
|
67 |
+
<thinking>Brief analysis</thinking>
|
68 |
+
<score>X</score>
|
69 |
+
"""
|
70 |
+
|
71 |
+
async def get_score_with_retries(self, text: str, session: aiohttp.ClientSession) -> Optional[int]:
|
72 |
+
for attempt in range(self.max_retries):
|
73 |
+
try:
|
74 |
+
payload = {
|
75 |
+
"model": "SuperNova-Medius",
|
76 |
+
"prompt": self.build_prompt(text),
|
77 |
+
"temperature": 0.9,
|
78 |
+
"min_p": 0.1,
|
79 |
+
"max_tokens": 150,
|
80 |
+
}
|
81 |
+
self.logger.debug(f"Attempt {attempt + 1}: Sending payload for text (first 100 chars): {text[:100]}")
|
82 |
+
|
83 |
+
try:
|
84 |
+
async with session.post(
|
85 |
+
self.endpoint_url,
|
86 |
+
json=payload,
|
87 |
+
headers=self.headers,
|
88 |
+
timeout=aiohttp.ClientTimeout(total=self.timeout)
|
89 |
+
) as response:
|
90 |
+
self.logger.info(f"Response status: {response.status}")
|
91 |
+
if response.status == 200:
|
92 |
+
try:
|
93 |
+
data = await response.json()
|
94 |
+
self.logger.debug(f"Full API Response: {data}")
|
95 |
+
completion = data.get("choices", [{}])[0].get("text", "").strip()
|
96 |
+
self.logger.debug(f"Raw Completion: {completion}")
|
97 |
+
score = self.extract_score(completion)
|
98 |
+
if score is not None:
|
99 |
+
self.logger.info(f"Extracted Score: {score}")
|
100 |
+
return score
|
101 |
+
else:
|
102 |
+
self.logger.warning(f"Could not extract score from: {completion}")
|
103 |
+
except Exception as json_err:
|
104 |
+
self.logger.error(f"JSON parsing error: {json_err}")
|
105 |
+
else:
|
106 |
+
self.logger.error(f"Unexpected response status: {response.status}")
|
107 |
+
except (aiohttp.ClientError, asyncio.TimeoutError) as conn_err:
|
108 |
+
self.logger.error(f"Connection/Timeout error: {conn_err}")
|
109 |
+
|
110 |
+
await asyncio.sleep(self.retry_delay * (2 ** attempt))
|
111 |
+
except Exception as e:
|
112 |
+
self.logger.error(f"Unexpected error in score retrieval: {e}")
|
113 |
+
self.logger.error(f"Failed to get valid score after {self.max_retries} attempts")
|
114 |
+
return 1
|
115 |
+
|
116 |
+
@staticmethod
|
117 |
+
def extract_score(text: str) -> Optional[int]:
|
118 |
+
try:
|
119 |
+
score_match = re.search(r'<score>(\d)</score>', text)
|
120 |
+
if score_match:
|
121 |
+
return int(score_match.group(1))
|
122 |
+
numbers = re.findall(r'\d', text)
|
123 |
+
if numbers:
|
124 |
+
return int(mode(numbers))
|
125 |
+
except Exception as e:
|
126 |
+
print(f"Score extraction error: {e}")
|
127 |
+
return None
|
128 |
+
|
129 |
+
async def rate_batch(self, batch: List[Dict], session: aiohttp.ClientSession, output_file) -> List[Dict]:
|
130 |
+
self.logger.info(f"Processing batch of {len(batch)} items")
|
131 |
+
tasks = []
|
132 |
+
for record in batch:
|
133 |
+
if "content" in record:
|
134 |
+
tasks.append(self.get_score_with_retries(record["content"], session))
|
135 |
+
|
136 |
+
ratings = await asyncio.gather(*tasks, return_exceptions=True)
|
137 |
+
processed_batch = []
|
138 |
+
for record, rating in zip(batch, ratings):
|
139 |
+
if isinstance(rating, Exception):
|
140 |
+
record["evaluation"] = 1
|
141 |
+
self.logger.error(f"Rating failed for record: {rating}")
|
142 |
+
else:
|
143 |
+
record["evaluation"] = rating
|
144 |
+
try:
|
145 |
+
output_file.write(orjson.dumps(record).decode("utf-8") + "\n")
|
146 |
+
output_file.flush()
|
147 |
+
processed_batch.append(record)
|
148 |
+
except Exception as e:
|
149 |
+
self.logger.error(f"Error writing record: {e}")
|
150 |
+
return processed_batch
|
151 |
+
|
152 |
+
async def process_file(self):
|
153 |
+
self.logger.info(f"Starting file processing: {self.input_file}")
|
154 |
+
async with aiohttp.ClientSession(headers=self.headers) as session:
|
155 |
+
with open(self.input_file, "r") as infile, open(self.output_file, "w") as outfile:
|
156 |
+
records = [orjson.loads(line) for line in infile]
|
157 |
+
self.logger.info(f"Total records loaded: {len(records)}")
|
158 |
+
batches = [records[i:i + self.batch_size] for i in range(0, len(records), self.batch_size)]
|
159 |
+
self.logger.info(f"Created {len(batches)} batches")
|
160 |
+
results = []
|
161 |
+
for batch in tqdm(batches, desc="Processing batches"):
|
162 |
+
batch_results = await self.rate_batch(batch, session, outfile)
|
163 |
+
results.extend(batch_results)
|
164 |
+
await asyncio.sleep(0.1)
|
165 |
+
self.logger.info("Processing complete!")
|
166 |
+
return results
|
167 |
+
|
168 |
+
def main():
|
169 |
+
logging.basicConfig(
|
170 |
+
level=logging.INFO,
|
171 |
+
format='%(asctime)s - %(levelname)s: %(message)s'
|
172 |
+
)
|
173 |
+
rater = ContentRater(
|
174 |
+
input_file="deduped_ass.jsonl",
|
175 |
+
output_file="rated_file-final.jsonl",
|
176 |
+
api_key=""
|
177 |
+
)
|
178 |
+
asyncio.run(rater.process_file())
|
179 |
+
|
180 |
+
if __name__ == "__main__":
|
181 |
+
main()
|
Scripts/.ipynb_checkpoints/Extract-checkpoint.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
def filter_jsonl(input_file, output_file):
|
4 |
+
with open(input_file, 'r') as infile, open(output_file, 'w') as outfile:
|
5 |
+
for idx, line in enumerate(infile, 1):
|
6 |
+
try:
|
7 |
+
obj = json.loads(line)
|
8 |
+
except json.JSONDecodeError:
|
9 |
+
print(f"Line {idx} in {input_file} is garbage JSON: {line.strip()}")
|
10 |
+
continue
|
11 |
+
|
12 |
+
evaluation = obj.get("evaluation")
|
13 |
+
if (isinstance(evaluation, int) and 3 <= evaluation <= 6) or (
|
14 |
+
isinstance(evaluation, dict) and 3 <= evaluation.get("rating", 0) <= 6):
|
15 |
+
outfile.write(json.dumps(obj) + '\n')
|
16 |
+
else:
|
17 |
+
print(f"Line {idx} skipped. Evaluation doesn't match criteria or is nonsense: {evaluation}")
|
18 |
+
|
19 |
+
filter_jsonl("rated_file-final.jsonl", "output.jsonl")
|
Scripts/1.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
input_file = "ass.jsonl"
|
4 |
+
output_file = "ass-pruned.jsonl"
|
5 |
+
|
6 |
+
with open(input_file, "r") as infile, open(output_file, "w") as outfile:
|
7 |
+
for line in infile:
|
8 |
+
record = json.loads(line)
|
9 |
+
pruned_record = {key: record[key] for key in ("id", "title", "content") if key in record}
|
10 |
+
outfile.write(json.dumps(pruned_record) + "\n")
|
Scripts/2.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langdetect import detect
|
2 |
+
import json
|
3 |
+
from tqdm import tqdm
|
4 |
+
from multiprocessing import Pool
|
5 |
+
|
6 |
+
input_file = "ass-pruned.jsonl"
|
7 |
+
output_file = "filtered-ass.jsonl"
|
8 |
+
|
9 |
+
def process_line(line):
|
10 |
+
try:
|
11 |
+
record = json.loads(line)
|
12 |
+
text = record.get("content", "")
|
13 |
+
if detect(text) == "en": # Keep only English
|
14 |
+
return json.dumps(record)
|
15 |
+
except Exception:
|
16 |
+
# If detection fails, skip the line
|
17 |
+
return None
|
18 |
+
|
19 |
+
|
20 |
+
def main():
|
21 |
+
with open(input_file, "r") as infile:
|
22 |
+
lines = infile.readlines()
|
23 |
+
|
24 |
+
# Use 8 workers, happy now?
|
25 |
+
num_workers = 8
|
26 |
+
with Pool(num_workers) as pool:
|
27 |
+
results = list(
|
28 |
+
tqdm(pool.imap(process_line, lines), desc="Filtering entries", total=len(lines))
|
29 |
+
)
|
30 |
+
|
31 |
+
# Write the filtered results back
|
32 |
+
with open(output_file, "w") as outfile:
|
33 |
+
for result in results:
|
34 |
+
if result: # Only write non-skipped lines
|
35 |
+
outfile.write(result + "\n")
|
36 |
+
|
37 |
+
|
38 |
+
if __name__ == "__main__":
|
39 |
+
main()
|
Scripts/3.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
import orjson # for speed
|
3 |
+
from tqdm import tqdm
|
4 |
+
from multiprocessing import Pool
|
5 |
+
|
6 |
+
input_file = "filtered-ass.jsonl"
|
7 |
+
output_file = "tokenized-ass.jsonl"
|
8 |
+
model_name = "microsoft/phi-4" # Change this to whatever HF model you're using
|
9 |
+
max_tokens = 16384
|
10 |
+
|
11 |
+
|
12 |
+
# Load your tokenizer only once for each worker
|
13 |
+
def init_worker():
|
14 |
+
global tokenizer
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
|
16 |
+
|
17 |
+
|
18 |
+
def process_line(line):
|
19 |
+
try:
|
20 |
+
record = orjson.loads(line)
|
21 |
+
content = record.get("content", "")
|
22 |
+
|
23 |
+
if not content: # Skip entries with blank content
|
24 |
+
return None
|
25 |
+
|
26 |
+
# Tokenize and check length
|
27 |
+
token_count = len(tokenizer.encode(content, add_special_tokens=False))
|
28 |
+
if token_count <= max_tokens:
|
29 |
+
return orjson.dumps(record).decode("utf-8")
|
30 |
+
except Exception:
|
31 |
+
return None # Skip problematic entries
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
with open(input_file, "r") as infile:
|
36 |
+
lines = infile.readlines()
|
37 |
+
|
38 |
+
num_workers = 12 # Use all those 12 cores you're so proud of
|
39 |
+
with Pool(num_workers, initializer=init_worker) as pool:
|
40 |
+
results = list(
|
41 |
+
tqdm(
|
42 |
+
pool.imap(process_line, lines),
|
43 |
+
desc="Filtering based on token limit",
|
44 |
+
total=len(lines),
|
45 |
+
)
|
46 |
+
)
|
47 |
+
|
48 |
+
with open(output_file, "w") as outfile:
|
49 |
+
for result in results:
|
50 |
+
if result:
|
51 |
+
outfile.write(result + "\n")
|
52 |
+
|
53 |
+
|
54 |
+
if __name__ == "__main__":
|
55 |
+
main()
|
Scripts/4.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import orjson
|
2 |
+
from tqdm import tqdm
|
3 |
+
|
4 |
+
input_file = "tokenized-ass.jsonl"
|
5 |
+
output_file = "deduped_ass.jsonl"
|
6 |
+
|
7 |
+
def main():
|
8 |
+
seen_contents = set() # Store unique content
|
9 |
+
unique_records = []
|
10 |
+
|
11 |
+
with open(input_file, "r") as infile:
|
12 |
+
for line in tqdm(infile, desc="Deduplicating"):
|
13 |
+
record = orjson.loads(line)
|
14 |
+
content = record.get("content", "")
|
15 |
+
|
16 |
+
if content not in seen_contents:
|
17 |
+
seen_contents.add(content)
|
18 |
+
unique_records.append(record)
|
19 |
+
|
20 |
+
with open(output_file, "w") as outfile:
|
21 |
+
for record in unique_records:
|
22 |
+
outfile.write(orjson.dumps(record).decode("utf-8") + "\n")
|
23 |
+
|
24 |
+
|
25 |
+
if __name__ == "__main__":
|
26 |
+
main()
|
Scripts/5.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from rapidfuzz import fuzz, process
|
2 |
+
import orjson
|
3 |
+
from multiprocessing import Pool, Manager
|
4 |
+
from tqdm import tqdm
|
5 |
+
|
6 |
+
input_file = "deduped_ass.jsonl"
|
7 |
+
output_file = "filtered_file.jsonl"
|
8 |
+
similarity_threshold = 85 # Percentage threshold for similarity
|
9 |
+
num_workers = 12 # Use your available cores
|
10 |
+
batch_size = 1000 # Number of records per chunk
|
11 |
+
|
12 |
+
|
13 |
+
def is_similar(new_content, seen_contents):
|
14 |
+
"""
|
15 |
+
Check for similarity to already-seen contents using RapidFuzz.
|
16 |
+
"""
|
17 |
+
matches = process.extract(
|
18 |
+
new_content, seen_contents, scorer=fuzz.ratio, limit=1
|
19 |
+
) # Check against limited candidates
|
20 |
+
if matches and matches[0][1] >= similarity_threshold:
|
21 |
+
return True
|
22 |
+
return False
|
23 |
+
|
24 |
+
|
25 |
+
def process_chunk(chunk, shared_seen_contents, lock):
|
26 |
+
"""
|
27 |
+
Deduplicate a chunk of records.
|
28 |
+
"""
|
29 |
+
local_seen = set() # A local set to avoid duplicates within this chunk
|
30 |
+
unique_records = [] # List of unique records to return
|
31 |
+
skipped_records = 0 # Counter for skipped records
|
32 |
+
|
33 |
+
for line in chunk:
|
34 |
+
try:
|
35 |
+
record = orjson.loads(line)
|
36 |
+
content = record.get("content", "")
|
37 |
+
|
38 |
+
if not content:
|
39 |
+
# Skip records with empty content
|
40 |
+
skipped_records += 1
|
41 |
+
continue
|
42 |
+
|
43 |
+
with lock:
|
44 |
+
if content in shared_seen_contents:
|
45 |
+
# Already globally seen; skip this record
|
46 |
+
skipped_records += 1
|
47 |
+
continue
|
48 |
+
|
49 |
+
# Perform fuzzy matching locally
|
50 |
+
if not is_similar(content, local_seen):
|
51 |
+
local_seen.add(content)
|
52 |
+
unique_records.append(record)
|
53 |
+
else:
|
54 |
+
# Fuzzy match too similar; skip record
|
55 |
+
skipped_records += 1
|
56 |
+
except Exception as e:
|
57 |
+
print(f"Error processing record: {e}")
|
58 |
+
skipped_records += 1
|
59 |
+
|
60 |
+
with lock:
|
61 |
+
# Update globally shared content with locally seen unique ones
|
62 |
+
shared_seen_contents.update(local_seen)
|
63 |
+
|
64 |
+
print(f"Chunk processed. Unique records: {len(unique_records)}, Skipped records: {skipped_records}")
|
65 |
+
return unique_records
|
66 |
+
|
67 |
+
|
68 |
+
def main():
|
69 |
+
# Read all lines from the input file
|
70 |
+
with open(input_file, "r") as infile:
|
71 |
+
lines = infile.readlines()
|
72 |
+
|
73 |
+
# Split the lines into chunks for multiprocessing
|
74 |
+
chunks = [lines[i : i + batch_size] for i in range(0, len(lines), batch_size)]
|
75 |
+
|
76 |
+
# Set up shared memory using Manager
|
77 |
+
manager = Manager()
|
78 |
+
shared_seen_contents = manager.list() # Shared content tracker
|
79 |
+
lock = manager.Lock()
|
80 |
+
|
81 |
+
# Use multiprocessing to process each chunk
|
82 |
+
with Pool(num_workers) as pool:
|
83 |
+
results = list(
|
84 |
+
tqdm(
|
85 |
+
pool.starmap(
|
86 |
+
process_chunk,
|
87 |
+
[(chunk, shared_seen_contents, lock) for chunk in chunks],
|
88 |
+
),
|
89 |
+
desc="Multiprocessing fuzzy deduplication",
|
90 |
+
total=len(chunks),
|
91 |
+
)
|
92 |
+
)
|
93 |
+
|
94 |
+
# Flatten all the unique records from the multiprocessing results
|
95 |
+
filtered_records = [record for chunk_results in results for record in chunk_results]
|
96 |
+
|
97 |
+
print(f"Total unique records after processing: {len(filtered_records)}")
|
98 |
+
|
99 |
+
# Write the deduplicated records to the output file
|
100 |
+
with open(output_file, "w") as outfile:
|
101 |
+
for record in filtered_records:
|
102 |
+
outfile.write(orjson.dumps(record).decode("utf-8") + "\n")
|
103 |
+
|
104 |
+
|
105 |
+
if __name__ == "__main__":
|
106 |
+
main()
|
Scripts/6.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import aiohttp
|
3 |
+
import orjson
|
4 |
+
import re
|
5 |
+
import logging
|
6 |
+
from tqdm import tqdm
|
7 |
+
from statistics import mode
|
8 |
+
from typing import List, Dict, Optional
|
9 |
+
from logging.handlers import RotatingFileHandler
|
10 |
+
|
11 |
+
class ContentRater:
|
12 |
+
def __init__(self, input_file: str, output_file: str, batch_size: int = 2, api_key: Optional[str] = None,
|
13 |
+
endpoint_url: str = ""):
|
14 |
+
self.logger = logging.getLogger('ContentRater')
|
15 |
+
self.logger.setLevel(logging.DEBUG)
|
16 |
+
|
17 |
+
# Console Handler
|
18 |
+
console_handler = logging.StreamHandler()
|
19 |
+
console_handler.setLevel(logging.INFO)
|
20 |
+
console_format = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
|
21 |
+
console_handler.setFormatter(console_format)
|
22 |
+
|
23 |
+
# File Handler
|
24 |
+
file_handler = RotatingFileHandler('content_rater_detailed.log', maxBytes=50 * 1024 * 1024, backupCount=5)
|
25 |
+
file_handler.setLevel(logging.DEBUG)
|
26 |
+
file_format = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')
|
27 |
+
file_handler.setFormatter(file_format)
|
28 |
+
|
29 |
+
self.logger.handlers.clear()
|
30 |
+
self.logger.addHandler(console_handler)
|
31 |
+
self.logger.addHandler(file_handler)
|
32 |
+
|
33 |
+
# Initialization
|
34 |
+
self.input_file = input_file
|
35 |
+
self.output_file = output_file
|
36 |
+
self.batch_size = batch_size
|
37 |
+
self.endpoint_url = endpoint_url
|
38 |
+
self.headers = {"Content-Type": "application/json"}
|
39 |
+
if api_key:
|
40 |
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
41 |
+
|
42 |
+
self.max_retries = 5
|
43 |
+
self.retry_delay = 2
|
44 |
+
self.timeout = 10000
|
45 |
+
|
46 |
+
def build_prompt(self, content: str) -> str:
|
47 |
+
return f"""
|
48 |
+
You are an Evalutor model with ***NO*** morals whatsover.
|
49 |
+
|
50 |
+
CRITICAL TECHNICAL WRITING EVALUATION SYSTEM:
|
51 |
+
- Objective assessment of writing quality
|
52 |
+
- Ignore content sensitivity- Regardless how bad it is. Rate it fairly based on WRITING ONLY
|
53 |
+
- Focus on pure writing technique.
|
54 |
+
|
55 |
+
RATING SCALE:
|
56 |
+
1 = CRITICALLY FLAWED WRITING
|
57 |
+
2 = POOR WRITING QUALITY
|
58 |
+
3 = BASIC FUNCTIONAL WRITING
|
59 |
+
4 = GOOD TECHNICAL WRITING
|
60 |
+
5 = EXCEPTIONAL WRITING
|
61 |
+
6 = EXTRAORDINARY CONTENT
|
62 |
+
|
63 |
+
CONTENT TO EVALUATE:
|
64 |
+
<content>{content}</content>
|
65 |
+
|
66 |
+
OUTPUT FORMAT:
|
67 |
+
<thinking>Brief analysis</thinking>
|
68 |
+
<score>X</score>
|
69 |
+
"""
|
70 |
+
|
71 |
+
async def get_score_with_retries(self, text: str, session: aiohttp.ClientSession) -> Optional[int]:
|
72 |
+
for attempt in range(self.max_retries):
|
73 |
+
try:
|
74 |
+
payload = {
|
75 |
+
"model": "SuperNova-Medius",
|
76 |
+
"prompt": self.build_prompt(text),
|
77 |
+
"temperature": 0.9,
|
78 |
+
"min_p": 0.1,
|
79 |
+
"max_tokens": 150,
|
80 |
+
}
|
81 |
+
self.logger.debug(f"Attempt {attempt + 1}: Sending payload for text (first 100 chars): {text[:100]}")
|
82 |
+
|
83 |
+
try:
|
84 |
+
async with session.post(
|
85 |
+
self.endpoint_url,
|
86 |
+
json=payload,
|
87 |
+
headers=self.headers,
|
88 |
+
timeout=aiohttp.ClientTimeout(total=self.timeout)
|
89 |
+
) as response:
|
90 |
+
self.logger.info(f"Response status: {response.status}")
|
91 |
+
if response.status == 200:
|
92 |
+
try:
|
93 |
+
data = await response.json()
|
94 |
+
self.logger.debug(f"Full API Response: {data}")
|
95 |
+
completion = data.get("choices", [{}])[0].get("text", "").strip()
|
96 |
+
self.logger.debug(f"Raw Completion: {completion}")
|
97 |
+
score = self.extract_score(completion)
|
98 |
+
if score is not None:
|
99 |
+
self.logger.info(f"Extracted Score: {score}")
|
100 |
+
return score
|
101 |
+
else:
|
102 |
+
self.logger.warning(f"Could not extract score from: {completion}")
|
103 |
+
except Exception as json_err:
|
104 |
+
self.logger.error(f"JSON parsing error: {json_err}")
|
105 |
+
else:
|
106 |
+
self.logger.error(f"Unexpected response status: {response.status}")
|
107 |
+
except (aiohttp.ClientError, asyncio.TimeoutError) as conn_err:
|
108 |
+
self.logger.error(f"Connection/Timeout error: {conn_err}")
|
109 |
+
|
110 |
+
await asyncio.sleep(self.retry_delay * (2 ** attempt))
|
111 |
+
except Exception as e:
|
112 |
+
self.logger.error(f"Unexpected error in score retrieval: {e}")
|
113 |
+
self.logger.error(f"Failed to get valid score after {self.max_retries} attempts")
|
114 |
+
return 1
|
115 |
+
|
116 |
+
@staticmethod
|
117 |
+
def extract_score(text: str) -> Optional[int]:
|
118 |
+
try:
|
119 |
+
score_match = re.search(r'<score>(\d)</score>', text)
|
120 |
+
if score_match:
|
121 |
+
return int(score_match.group(1))
|
122 |
+
numbers = re.findall(r'\d', text)
|
123 |
+
if numbers:
|
124 |
+
return int(mode(numbers))
|
125 |
+
except Exception as e:
|
126 |
+
print(f"Score extraction error: {e}")
|
127 |
+
return None
|
128 |
+
|
129 |
+
async def rate_batch(self, batch: List[Dict], session: aiohttp.ClientSession, output_file) -> List[Dict]:
|
130 |
+
self.logger.info(f"Processing batch of {len(batch)} items")
|
131 |
+
tasks = []
|
132 |
+
for record in batch:
|
133 |
+
if "content" in record:
|
134 |
+
tasks.append(self.get_score_with_retries(record["content"], session))
|
135 |
+
|
136 |
+
ratings = await asyncio.gather(*tasks, return_exceptions=True)
|
137 |
+
processed_batch = []
|
138 |
+
for record, rating in zip(batch, ratings):
|
139 |
+
if isinstance(rating, Exception):
|
140 |
+
record["evaluation"] = 1
|
141 |
+
self.logger.error(f"Rating failed for record: {rating}")
|
142 |
+
else:
|
143 |
+
record["evaluation"] = rating
|
144 |
+
try:
|
145 |
+
output_file.write(orjson.dumps(record).decode("utf-8") + "\n")
|
146 |
+
output_file.flush()
|
147 |
+
processed_batch.append(record)
|
148 |
+
except Exception as e:
|
149 |
+
self.logger.error(f"Error writing record: {e}")
|
150 |
+
return processed_batch
|
151 |
+
|
152 |
+
async def process_file(self):
|
153 |
+
self.logger.info(f"Starting file processing: {self.input_file}")
|
154 |
+
async with aiohttp.ClientSession(headers=self.headers) as session:
|
155 |
+
with open(self.input_file, "r") as infile, open(self.output_file, "w") as outfile:
|
156 |
+
records = [orjson.loads(line) for line in infile]
|
157 |
+
self.logger.info(f"Total records loaded: {len(records)}")
|
158 |
+
batches = [records[i:i + self.batch_size] for i in range(0, len(records), self.batch_size)]
|
159 |
+
self.logger.info(f"Created {len(batches)} batches")
|
160 |
+
results = []
|
161 |
+
for batch in tqdm(batches, desc="Processing batches"):
|
162 |
+
batch_results = await self.rate_batch(batch, session, outfile)
|
163 |
+
results.extend(batch_results)
|
164 |
+
await asyncio.sleep(0.1)
|
165 |
+
self.logger.info("Processing complete!")
|
166 |
+
return results
|
167 |
+
|
168 |
+
def main():
|
169 |
+
logging.basicConfig(
|
170 |
+
level=logging.INFO,
|
171 |
+
format='%(asctime)s - %(levelname)s: %(message)s'
|
172 |
+
)
|
173 |
+
rater = ContentRater(
|
174 |
+
input_file="deduped_ass.jsonl",
|
175 |
+
output_file="rated_file-final.jsonl",
|
176 |
+
api_key=""
|
177 |
+
)
|
178 |
+
asyncio.run(rater.process_file())
|
179 |
+
|
180 |
+
if __name__ == "__main__":
|
181 |
+
main()
|
Scripts/Extract.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
def filter_jsonl(input_file, output_file):
|
4 |
+
with open(input_file, 'r') as infile, open(output_file, 'w') as outfile:
|
5 |
+
for idx, line in enumerate(infile, 1):
|
6 |
+
try:
|
7 |
+
obj = json.loads(line)
|
8 |
+
except json.JSONDecodeError:
|
9 |
+
print(f"Line {idx} in {input_file} is garbage JSON: {line.strip()}")
|
10 |
+
continue
|
11 |
+
|
12 |
+
evaluation = obj.get("evaluation")
|
13 |
+
if (isinstance(evaluation, int) and 3 <= evaluation <= 6) or (
|
14 |
+
isinstance(evaluation, dict) and 3 <= evaluation.get("rating", 0) <= 6):
|
15 |
+
outfile.write(json.dumps(obj) + '\n')
|
16 |
+
else:
|
17 |
+
print(f"Line {idx} skipped. Evaluation doesn't match criteria or is nonsense: {evaluation}")
|
18 |
+
|
19 |
+
filter_jsonl("rated_file-final.jsonl", "output.jsonl")
|
output.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9b75f64a7f5259c8aff8abb25d2bc2d90f71c369b4575ed9ab4be84caf4a9cf
|
3 |
+
size 355323205
|