Update app.py
Browse files
app.py
CHANGED
|
@@ -1,155 +1,79 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
.
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
.
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
text-align: center;
|
| 81 |
-
margin-bottom: 20px;
|
| 82 |
-
}
|
| 83 |
-
|
| 84 |
-
/* Description Styling */
|
| 85 |
-
.gradio-description {
|
| 86 |
-
font-size: 18px;
|
| 87 |
-
color: #eeeeee; /* Light gray text for description */
|
| 88 |
-
text-align: center;
|
| 89 |
-
margin-bottom: 30px;
|
| 90 |
-
}
|
| 91 |
-
|
| 92 |
-
/* Chat History Box Styling */
|
| 93 |
-
#chat-history {
|
| 94 |
-
font-size: 16px;
|
| 95 |
-
border-radius: 12px;
|
| 96 |
-
border: none;
|
| 97 |
-
padding: 14px;
|
| 98 |
-
background: rgba(255, 255, 255, 0.9); /* Slight transparent white background */
|
| 99 |
-
color: #444;
|
| 100 |
-
margin-bottom: 20px;
|
| 101 |
-
height: 300px;
|
| 102 |
-
overflow-y: auto;
|
| 103 |
-
font-family: 'Roboto', sans-serif;
|
| 104 |
-
box-shadow: 0px 4px 12px rgba(0, 0, 0, 0.1); /* Soft shadow for chat history */
|
| 105 |
-
}
|
| 106 |
-
|
| 107 |
-
/* Input Textbox Styling */
|
| 108 |
-
#user-input-textbox {
|
| 109 |
-
font-size: 16px;
|
| 110 |
-
border-radius: 10px;
|
| 111 |
-
border: none;
|
| 112 |
-
padding: 12px;
|
| 113 |
-
width: 100%;
|
| 114 |
-
margin-bottom: 15px;
|
| 115 |
-
background-color: #FFFFFF;
|
| 116 |
-
font-family: 'Roboto', sans-serif;
|
| 117 |
-
transition: background-color 0.3s ease; /* Smooth transition for hover effect */
|
| 118 |
-
}
|
| 119 |
-
|
| 120 |
-
#user-input-textbox:focus {
|
| 121 |
-
background-color: #FFFFFF; /* Light gray background on focus */
|
| 122 |
-
border-color: #FF6A00; /* Orange border on focus */
|
| 123 |
-
}
|
| 124 |
-
|
| 125 |
-
/* Submit Button Styling */
|
| 126 |
-
.gradio-button {
|
| 127 |
-
background-color: #3A9FD6; /* Blue color for submit button */
|
| 128 |
-
color: white;
|
| 129 |
-
padding: 14px 28px;
|
| 130 |
-
border-radius: 8px;
|
| 131 |
-
box-shadow: 0px 6px 30px H000000; /* Soft shadow for depth */
|
| 132 |
-
border: none;
|
| 133 |
-
font-size: 18px;
|
| 134 |
-
font-weight: 600;
|
| 135 |
-
cursor: pointer;
|
| 136 |
-
transition: all 0.3s ease-in-out;
|
| 137 |
-
}
|
| 138 |
-
|
| 139 |
-
.gradio-button:hover {
|
| 140 |
-
background-color: #FF6A00; /* Orange color on hover */
|
| 141 |
-
transform: scale(1.05); /* Slight button scaling on hover */
|
| 142 |
-
}
|
| 143 |
-
|
| 144 |
-
/* Adjust padding and margins */
|
| 145 |
-
.gradio-block {
|
| 146 |
-
padding: 0;
|
| 147 |
-
}
|
| 148 |
-
.gradio-row {
|
| 149 |
-
margin-bottom: 25px;
|
| 150 |
-
}
|
| 151 |
-
"""
|
| 152 |
-
|
| 153 |
-
demo.launch()
|
| 154 |
-
|
| 155 |
-
launch_gradio_interface()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
| 3 |
+
import PyPDF2
|
| 4 |
+
|
| 5 |
+
# Load Models
|
| 6 |
+
model1_name = "t5-small" # Model 1
|
| 7 |
+
model2_name = "codeparrot/codeparrot-small" # Model 2
|
| 8 |
+
model3_name = "Salesforce/blip-image-captioning" # Model 3
|
| 9 |
+
|
| 10 |
+
# Load Pipelines
|
| 11 |
+
model1 = pipeline("text2text-generation", model=model1_name, tokenizer=model1_name)
|
| 12 |
+
model2 = pipeline("text-generation", model=model2_name, tokenizer=model2_name)
|
| 13 |
+
model3 = pipeline("image-to-text", model=model3_name) # We'll adapt this for PDF processing
|
| 14 |
+
|
| 15 |
+
# Helper: Extract text from PDF
|
| 16 |
+
def extract_text_from_pdf(pdf_file):
|
| 17 |
+
pdf_reader = PyPDF2.PdfReader(pdf_file)
|
| 18 |
+
text = ""
|
| 19 |
+
for page in pdf_reader.pages:
|
| 20 |
+
text += page.extract_text()
|
| 21 |
+
return text
|
| 22 |
+
|
| 23 |
+
# Function for Model 1
|
| 24 |
+
def model1_func(input_text):
|
| 25 |
+
try:
|
| 26 |
+
result = model1(input_text, max_length=50, num_return_sequences=1)
|
| 27 |
+
answer = result[0]["generated_text"]
|
| 28 |
+
return f"Model 1 Output: {answer}"
|
| 29 |
+
except Exception as e:
|
| 30 |
+
return f"Error: {str(e)}"
|
| 31 |
+
|
| 32 |
+
# Function for Model 2
|
| 33 |
+
def model2_func(input_text):
|
| 34 |
+
try:
|
| 35 |
+
result = model2(input_text, max_length=50, num_return_sequences=1)
|
| 36 |
+
answer = result[0]["generated_text"]
|
| 37 |
+
return f"Model 2 Output: {answer}"
|
| 38 |
+
except Exception as e:
|
| 39 |
+
return f"Error: {str(e)}"
|
| 40 |
+
|
| 41 |
+
# Function for Model 3
|
| 42 |
+
def model3_func(pdf_file):
|
| 43 |
+
try:
|
| 44 |
+
extracted_text = extract_text_from_pdf(pdf_file)
|
| 45 |
+
if not extracted_text.strip():
|
| 46 |
+
return "No text found in the PDF. Please upload a valid file."
|
| 47 |
+
result = model3(extracted_text)
|
| 48 |
+
answer = result[0]["generated_text"]
|
| 49 |
+
return f"Model 3 Output: {answer}"
|
| 50 |
+
except Exception as e:
|
| 51 |
+
return f"Error: {str(e)}"
|
| 52 |
+
|
| 53 |
+
# Gradio Interface
|
| 54 |
+
with gr.Blocks() as demo:
|
| 55 |
+
gr.Markdown("<h1>Multi-Model NLP Tool</h1>")
|
| 56 |
+
|
| 57 |
+
with gr.Tab("Model 1"):
|
| 58 |
+
gr.Markdown("**Model 1: Text-to-Text (e.g., Summarization)**")
|
| 59 |
+
model1_input = gr.Textbox(label="Enter Text", placeholder="Type here...")
|
| 60 |
+
model1_output = gr.Textbox(label="Output")
|
| 61 |
+
model1_button = gr.Button("Generate")
|
| 62 |
+
model1_button.click(model1_func, inputs=model1_input, outputs=model1_output)
|
| 63 |
+
|
| 64 |
+
with gr.Tab("Model 2"):
|
| 65 |
+
gr.Markdown("**Model 2: Text Generation (e.g., Code Generation)**")
|
| 66 |
+
model2_input = gr.Textbox(label="Enter Text", placeholder="Type here...")
|
| 67 |
+
model2_output = gr.Textbox(label="Output")
|
| 68 |
+
model2_button = gr.Button("Generate")
|
| 69 |
+
model2_button.click(model2_func, inputs=model2_input, outputs=model2_output)
|
| 70 |
+
|
| 71 |
+
with gr.Tab("Model 3"):
|
| 72 |
+
gr.Markdown("**Model 3: Document Reader (PDF Input)**")
|
| 73 |
+
pdf_input = gr.File(label="Upload PDF")
|
| 74 |
+
model3_output = gr.Textbox(label="Output")
|
| 75 |
+
model3_button = gr.Button("Process PDF")
|
| 76 |
+
model3_button.click(model3_func, inputs=pdf_input, outputs=model3_output)
|
| 77 |
+
|
| 78 |
+
# Launch the app
|
| 79 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|