import gradio as gr import torch from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor from peft import PeftModel from PIL import Image import requests from io import BytesIO model = None processor = None def load_model(): global model, processor if model is None: print("Загружаю модель...") base_model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype=torch.float16, device_map="auto", trust_remote_code=True ) print("Применяю LoRA...") model = PeftModel.from_pretrained( base_model, "Stepan222/oem-fake-classifier-qwen2vl" ) model.eval() processor = AutoProcessor.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", trust_remote_code=True ) print("Модель готова!") return model, processor def classify(image_url: str, title: str, description: str = ""): try: model, processor = load_model() except Exception as e: return f"Ошибка модели: {e}" try: response = requests.get(image_url, timeout=10) image = Image.open(BytesIO(response.content)).convert("RGB") except: return "Не удалось загрузить изображение" text = f"Title: {title}" if description: text += f"\nDescription: {description}" prompt = f"""Analyze this eBay auto part listing. Is it OEM (original) or FAKE (aftermarket)? {text} Reply: OEM or FAKE with confidence % and reason.""" messages = [{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": prompt}]}] text_input = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) inputs = processor(text=[text_input], images=[image], return_tensors="pt", padding=True).to(model.device) with torch.no_grad(): out = model.generate(**inputs, max_new_tokens=80, do_sample=False) resp = processor.batch_decode(out, skip_special_tokens=True)[0] return resp.split("assistant")[-1].strip() if "assistant" in resp.lower() else resp demo = gr.Interface( fn=classify, inputs=[ gr.Textbox(label="Image URL"), gr.Textbox(label="Title"), gr.Textbox(label="Description") ], outputs=gr.Textbox(label="Result"), title="OEM/Fake Classifier" ) demo.launch()