Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Upload 2 files
Browse filescolor issues fixed
- app.py +8 -7
- requirements.txt +0 -1
    	
        app.py
    CHANGED
    
    | @@ -122,12 +122,12 @@ def xai_attributions_html(input_text: str): | |
| 122 | 
             
                html = html.replace("#/s", "")
         | 
| 123 | 
             
                html = sub("<th.*?/th>", "", html, 4, DOTALL)
         | 
| 124 | 
             
                html = sub("<td.*?/td>", "", html, 4, DOTALL)
         | 
| 125 | 
            -
                return word_attributions, html
         | 
| 126 |  | 
| 127 |  | 
| 128 | 
             
            def explanation_intro(prediction_label: str):
         | 
| 129 | 
             
                """
         | 
| 130 | 
            -
                generates model explanaiton markdown from prediction label of the model.
         | 
| 131 |  | 
| 132 | 
             
                Args:
         | 
| 133 | 
             
                  prediction_label (str): The label that the model predicted.
         | 
| @@ -135,10 +135,11 @@ def explanation_intro(prediction_label: str): | |
| 135 | 
             
                Returns:
         | 
| 136 | 
             
                  A string
         | 
| 137 | 
             
                """
         | 
| 138 | 
            -
                return f""" | 
|  | |
| 139 | 
             
                The figure below shows the contribution of each token to this decision.
         | 
| 140 | 
            -
                 | 
| 141 | 
            -
                The  | 
| 142 |  | 
| 143 |  | 
| 144 | 
             
            def explanation_viz(prediction_label: str, word_attributions):
         | 
| @@ -154,7 +155,7 @@ def explanation_viz(prediction_label: str, word_attributions): | |
| 154 | 
             
                  A string
         | 
| 155 | 
             
                """
         | 
| 156 | 
             
                top_attention_word = max(word_attributions, key=itemgetter(1))[0]
         | 
| 157 | 
            -
                return f"""The token **_'{top_attention_word}'_** is the biggest driver for the decision of the model as  | 
| 158 |  | 
| 159 |  | 
| 160 | 
             
            def word_attributions_dict_creater(word_attributions):
         | 
| @@ -237,7 +238,7 @@ if submit: | |
| 237 | 
             
                label_probs_figure, prediction_label = label_probs_figure_creater(input_text)
         | 
| 238 | 
             
                st.plotly_chart(label_probs_figure, config=hide_plotly_bar)
         | 
| 239 | 
             
                explanation_general = explanation_intro(prediction_label)
         | 
| 240 | 
            -
                st. | 
| 241 | 
             
                with st.spinner():
         | 
| 242 | 
             
                  word_attributions, html = xai_attributions_html(input_text)
         | 
| 243 | 
             
                  st.markdown(html, unsafe_allow_html=True)
         | 
|  | |
| 122 | 
             
                html = html.replace("#/s", "")
         | 
| 123 | 
             
                html = sub("<th.*?/th>", "", html, 4, DOTALL)
         | 
| 124 | 
             
                html = sub("<td.*?/td>", "", html, 4, DOTALL)
         | 
| 125 | 
            +
                return word_attributions, html+"<br>"
         | 
| 126 |  | 
| 127 |  | 
| 128 | 
             
            def explanation_intro(prediction_label: str):
         | 
| 129 | 
             
                """
         | 
| 130 | 
            +
                generates model explanaiton html markdown from prediction label of the model.
         | 
| 131 |  | 
| 132 | 
             
                Args:
         | 
| 133 | 
             
                  prediction_label (str): The label that the model predicted.
         | 
|  | |
| 135 | 
             
                Returns:
         | 
| 136 | 
             
                  A string
         | 
| 137 | 
             
                """
         | 
| 138 | 
            +
                return f"""<div style="background-color: lightblue;
         | 
| 139 | 
            +
              color: rgb(0, 66, 128);">The model predicted the given sentence as <span style="color: black"><strong>'{prediction_label}'</strong></span>.
         | 
| 140 | 
             
                The figure below shows the contribution of each token to this decision.
         | 
| 141 | 
            +
                <span style="color: darkgreen"><strong> Green </strong></span> tokens indicate a <strong>positive </strong> contribution, while <span style="color: red"><strong> red </strong></span> tokens indicate a <strong>negative</strong> contribution.
         | 
| 142 | 
            +
                The <strong>bolder</strong> the color, the greater the value.</div><br>"""
         | 
| 143 |  | 
| 144 |  | 
| 145 | 
             
            def explanation_viz(prediction_label: str, word_attributions):
         | 
|  | |
| 155 | 
             
                  A string
         | 
| 156 | 
             
                """
         | 
| 157 | 
             
                top_attention_word = max(word_attributions, key=itemgetter(1))[0]
         | 
| 158 | 
            +
                return f"""The token **_'{top_attention_word}'_** is the biggest driver for the decision of the model as **'{prediction_label}'**"""
         | 
| 159 |  | 
| 160 |  | 
| 161 | 
             
            def word_attributions_dict_creater(word_attributions):
         | 
|  | |
| 238 | 
             
                label_probs_figure, prediction_label = label_probs_figure_creater(input_text)
         | 
| 239 | 
             
                st.plotly_chart(label_probs_figure, config=hide_plotly_bar)
         | 
| 240 | 
             
                explanation_general = explanation_intro(prediction_label)
         | 
| 241 | 
            +
                st.markdown(explanation_general, unsafe_allow_html=True)
         | 
| 242 | 
             
                with st.spinner():
         | 
| 243 | 
             
                  word_attributions, html = xai_attributions_html(input_text)
         | 
| 244 | 
             
                  st.markdown(html, unsafe_allow_html=True)
         | 
    	
        requirements.txt
    CHANGED
    
    | @@ -1,6 +1,5 @@ | |
| 1 | 
             
            --find-links https://download.pytorch.org/whl/torch_stable.html
         | 
| 2 | 
             
            torch==1.13.1+cpu
         | 
| 3 | 
            -
            streamlit==1.16.0
         | 
| 4 | 
             
            accelerate
         | 
| 5 | 
             
            plotly
         | 
| 6 | 
             
            transformers
         | 
|  | |
| 1 | 
             
            --find-links https://download.pytorch.org/whl/torch_stable.html
         | 
| 2 | 
             
            torch==1.13.1+cpu
         | 
|  | |
| 3 | 
             
            accelerate
         | 
| 4 | 
             
            plotly
         | 
| 5 | 
             
            transformers
         |