Update README.md
Browse files
README.md
CHANGED
|
@@ -1,8 +1,42 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Model Card for CLIC-ViT-B-32-224-CogVLM
|
| 3 |
+
|
| 4 |
+
## Model Details
|
| 5 |
+
|
| 6 |
+
<!-- Provide the basic links for the model. -->
|
| 7 |
+
|
| 8 |
+
- **Model-details:** : Fine-tuned with CLIC using CogVLM relabelled 1M Laion dataset
|
| 9 |
+
|
| 10 |
+
## Model Usage
|
| 11 |
+
### With OpenCLIP
|
| 12 |
+
|
| 13 |
+
```
|
| 14 |
+
import torch
|
| 15 |
+
from PIL import Image
|
| 16 |
+
import open_clip
|
| 17 |
+
|
| 18 |
+
model, _, image_processor = open_clip.create_model_and_transforms('hf-hub:nmndeep/CLIC-ViT-B-32-224-CogVLM')
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
image = image_processor(Image.open(urlopen(
|
| 22 |
+
'https://images.pexels.com/photos/869258/pexels-photo-869258.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1'))).unsqueeze(0)
|
| 23 |
+
|
| 24 |
+
model.eval()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
tokenizer = open_clip.get_tokenizer('hf-hub:nmndeep/CLIC-ViT-B-32-224-CogVLM')
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
texts= ["a diagram", "a dog", "a cat", "snow"]
|
| 31 |
+
text = tokenizer(texts)
|
| 32 |
+
|
| 33 |
+
with torch.no_grad(), torch.autocast("cuda"):
|
| 34 |
+
image_features = model.encode_image(image)
|
| 35 |
+
text_features = model.encode_text(text)
|
| 36 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 37 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 38 |
+
|
| 39 |
+
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
|
| 40 |
+
idx = torch.argmax(text_probs)
|
| 41 |
+
print("Output label:", texts[idx])
|
| 42 |
+
```
|