Spaces:
Sleeping
Sleeping
File size: 3,061 Bytes
1051b11 fbf5042 c631351 6b485fc fbf5042 6b485fc fbf5042 6b485fc 09c694e fbf5042 924cd68 fbf5042 924cd68 f1889e7 fbf5042 6b485fc fbf5042 6b485fc fbf5042 f1889e7 fbf5042 924cd68 6b485fc 924cd68 fbf5042 f1889e7 924cd68 fbf5042 924cd68 f1889e7 fbf5042 6b485fc fbf5042 6b485fc fbf5042 f1889e7 fbf5042 924cd68 6b485fc 924cd68 fbf5042 924cd68 3496484 924cd68 3496484 924cd68 1f643c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
import base64
import re
import json
import pandas as pd
import gradio as gr
import pyterrier as pt
pt.init()
import pyt_splade
from pyterrier_gradio import Demo, MarkdownFile, interface, df2code, code2md, EX_Q, EX_D, df2list
factory_max = pyt_splade.Splade(agg='max')
factory_sum = pyt_splade.Splade(agg='sum')
COLAB_NAME = 'pyterrier_splade.ipynb'
COLAB_INSTALL = '''
!pip install -q git+https://github.com/naver/splade
!pip install -q git+https://github.com/cmacdonald/pyt_splade
'''.strip()
def generate_vis(df, mode='Document'):
if len(df) == 0:
return ''
result = []
if mode == 'Document':
max_score = max(max(t.values()) for t in df['toks'])
for row in df.itertuples(index=False):
if mode == 'Query':
tok_scores = row.query_toks
orig_tokens = factory_max.tokenizer.tokenize(row.query)
max_score = max(tok_scores.values())
id = row.qid
else:
tok_scores = row.toks
orig_tokens = factory_max.tokenizer.tokenize(row.text)
id = row.docno
def toks2span(toks):
return '<kbd> </kbd>'.join(f'<kbd style="background-color: rgba(66, 135, 245, {tok_scores.get(t, 0)/max_score});">{t}</kbd>' for t in toks)
orig_tokens_set = set(orig_tokens)
exp_tokens = [t for t, v in sorted(tok_scores.items(), key=lambda x: (-x[1], x[0])) if t not in orig_tokens_set]
result.append(f'''
<div style="font-size: 1.2em;">{mode}: <strong>{id}</strong></div>
<div style="margin: 4px 0 16px; padding: 4px; border: 1px solid black;">
<div>
{toks2span(orig_tokens)}
</div>
<div><strong>Expansion Tokens:</strong> {toks2span(exp_tokens)}</div>
</div>
''')
return '\n'.join(result)
def predict_query(input, agg):
code = f'''import pyt_splade
splade = pyt_splade.Splade(agg={agg!r})
query_pipeline = splade.query_encoder()
query_pipeline({df2list(input)})
'''
pipeline = {
'max': factory_max,
'sum': factory_sum
}[agg].query_encoder()
res = pipeline(input)
vis = generate_vis(res, mode='Query')
res['query_toks'] = [json.dumps({k: round(v, 4) for k, v in t.items()}) for t in res['query_toks']]
return (res, code2md(code, COLAB_INSTALL, COLAB_NAME), vis)
def predict_doc(input, agg):
code = f'''import pyt_splade
splade = pyt_splade.Splade(agg={repr(agg)})
doc_pipeline = splade.doc_encoder()
doc_pipeline({df2list(input)})
'''
pipeline = {
'max': factory_max,
'sum': factory_sum
}[agg].doc_encoder()
res = pipeline(input)
vis = generate_vis(res, mode='Document')
res['toks'] = [json.dumps({k: round(v, 4) for k, v in t.items()}) for t in res['toks']]
return (res, code2md(code, COLAB_INSTALL, COLAB_NAME), vis)
interface(
MarkdownFile('README.md'),
MarkdownFile('query.md'),
Demo(
predict_query,
EX_Q,
[
gr.Dropdown(choices=['max', 'sum'], value='max', label='Aggregation'),
],
scale=2/3
),
MarkdownFile('doc.md'),
Demo(
predict_doc,
EX_D,
[
gr.Dropdown(choices=['max', 'sum'], value='max', label='Aggregation'),
],
scale=2/3
),
MarkdownFile('wrapup.md'),
).launch(share=False)
|