Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from PIL import Image | |
| import os | |
| from helper import ( | |
| resize_image, convert_image_to_base64, post_request_and_parse_response, | |
| draw_bounding_boxes_for_textract, extract_text_from_textract_blocks, ChatGPTClient | |
| ) | |
| # Load OpenAI API Key from environment variable | |
| OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] | |
| TEXTRACT_API_URL = "https://2tsig211e0.execute-api.us-east-1.amazonaws.com/my_textract" | |
| st.set_page_config(page_title="π¬ Chat with OCR π", layout="wide") | |
| # Initialize chat history if not in session state | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Sidebar for image upload | |
| with st.sidebar: | |
| st.title("πΌοΈ Upload and Display Images") | |
| uploaded_image = st.file_uploader("Upload an Image", type=["png", "jpg", "jpeg"]) | |
| if uploaded_image: | |
| pil_image = Image.open(uploaded_image) | |
| resized_image = resize_image(pil_image) | |
| with st.expander("Original Image", expanded=False): | |
| st.image(pil_image, caption="Uploaded Image", use_column_width=True) | |
| # Convert image to base64 and send to Textract API | |
| image_base64 = convert_image_to_base64(resized_image) | |
| payload = {"image": image_base64} | |
| result_dict = post_request_and_parse_response(TEXTRACT_API_URL, payload) | |
| # Draw bounding boxes | |
| image_with_boxes = draw_bounding_boxes_for_textract(resized_image.copy(), result_dict) | |
| with st.expander("Image with Bounding Boxes", expanded=True): | |
| st.image(image_with_boxes, caption="Image with Bounding Boxes", use_column_width=True) | |
| # Extract text from Textract | |
| cleaned_up_body = extract_text_from_textract_blocks(result_dict['body']) | |
| # Add some space at the bottom of the sidebar | |
| st.sidebar.markdown("<br><br><br><br>", unsafe_allow_html=True) | |
| # Clear session button at the bottom of the sidebar | |
| if st.sidebar.button("Clear Session"): | |
| st.session_state.messages = [] | |
| # Main chat interface | |
| st.title("Chat with OCR Output") | |
| # Display previous messages from session state | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Initialize ChatGPTClient with session state history | |
| if uploaded_image: | |
| history_copy = st.session_state.messages.copy() | |
| if cleaned_up_body: | |
| history_copy.append({"role": "system", "content": cleaned_up_body}) | |
| bot = ChatGPTClient( | |
| api_key=OPENAI_API_KEY, | |
| protocol="You are fed with the text portion of json file that come out of OCR after scanning an image. User will ask you questions about this json file.", | |
| body=cleaned_up_body | |
| ) | |
| bot.history = history_copy # Set ChatGPT history to session state messages | |
| # React to user input | |
| if prompt := st.chat_input("Ask me about the image"): | |
| # Display user message in chat container | |
| st.chat_message("user").markdown(prompt) | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Generate a response using ChatGPTClient | |
| if uploaded_image: | |
| response = bot.generate_response(prompt) | |
| else: | |
| response = "Please upload an image before asking questions." | |
| # Display assistant message in chat container | |
| st.chat_message("assistant").markdown(response) | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |