nmndeep commited on
Commit
7b6ca58
·
verified ·
1 Parent(s): a942666

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +42 -8
README.md CHANGED
@@ -1,8 +1,42 @@
1
- ---
2
- tags:
3
- - clip
4
- library_name: open_clip
5
- pipeline_tag: zero-shot-image-classification
6
- license: mit
7
- ---
8
- # Model card for CLIC-ViT-B-32-224-CogVLM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Model Card for CLIC-ViT-B-32-224-CogVLM
3
+
4
+ ## Model Details
5
+
6
+ <!-- Provide the basic links for the model. -->
7
+
8
+ - **Model-details:** : Fine-tuned with CLIC using CogVLM relabelled 1M Laion dataset
9
+
10
+ ## Model Usage
11
+ ### With OpenCLIP
12
+
13
+ ```
14
+ import torch
15
+ from PIL import Image
16
+ import open_clip
17
+
18
+ model, _, image_processor = open_clip.create_model_and_transforms('hf-hub:nmndeep/CLIC-ViT-B-32-224-CogVLM')
19
+
20
+
21
+ image = image_processor(Image.open(urlopen(
22
+ 'https://images.pexels.com/photos/869258/pexels-photo-869258.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1'))).unsqueeze(0)
23
+
24
+ model.eval()
25
+
26
+
27
+ tokenizer = open_clip.get_tokenizer('hf-hub:nmndeep/CLIC-ViT-B-32-224-CogVLM')
28
+
29
+
30
+ texts= ["a diagram", "a dog", "a cat", "snow"]
31
+ text = tokenizer(texts)
32
+
33
+ with torch.no_grad(), torch.autocast("cuda"):
34
+ image_features = model.encode_image(image)
35
+ text_features = model.encode_text(text)
36
+ image_features /= image_features.norm(dim=-1, keepdim=True)
37
+ text_features /= text_features.norm(dim=-1, keepdim=True)
38
+
39
+ text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
40
+ idx = torch.argmax(text_probs)
41
+ print("Output label:", texts[idx])
42
+ ```