Spaces:
Sleeping
Sleeping
| from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration | |
| from PIL import Image | |
| import torch | |
| import gradio as gr | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"Using device: {device}") | |
| # Load model (fits in 12GB with fp16) | |
| model_name = "Salesforce/instructblip-flan-t5-xl" | |
| processor = InstructBlipProcessor.from_pretrained(model_name) | |
| model = InstructBlipForConditionalGeneration.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float16, | |
| device_map="auto" | |
| ) | |
| # Prompt to force paragraph-level description | |
| prompt = ( | |
| "Describe this image in a detailed paragraph of 5-7 sentences. " | |
| "Mention setting, objects, colors, actions, background details, and possible context." | |
| ) | |
| def caption_image(image): | |
| inputs = processor(images=image, text=prompt, return_tensors="pt").to(device) | |
| out = model.generate( | |
| **inputs, | |
| max_new_tokens=250, | |
| do_sample=True, | |
| temperature=0.7, | |
| top_p=0.9, | |
| repetition_penalty=1.2) | |
| output = processor.batch_decode(out, skip_special_tokens=True)[0] | |
| print(output) | |
| return output | |
| demo = gr.Interface( | |
| fn=caption_image, | |
| inputs=gr.Image(type="pil", label="Upload an Image", height=400), | |
| outputs="text", | |
| title="Image to Paragraph Captioning" | |
| ) | |
| demo.queue(api_open=False, max_size=10).launch() | |