Upload model
Browse files- config.json +6 -1
- model.py +5 -6
config.json
CHANGED
|
@@ -1,13 +1,18 @@
|
|
| 1 |
{
|
|
|
|
|
|
|
|
|
|
| 2 |
"audio_enc_dim": 1024,
|
| 3 |
"audio_encoder_name": "microsoft/wavlm-large",
|
| 4 |
"audio_processor_name": "microsoft/wavlm-base",
|
| 5 |
"auto_map": {
|
| 6 |
-
"AutoConfig": "config.SpeechLLMModelConfig"
|
|
|
|
| 7 |
},
|
| 8 |
"llm_dim": 2048,
|
| 9 |
"llm_model_checkpoint": "hf_repo/llm_model_checkpoint",
|
| 10 |
"llm_model_name": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
| 11 |
"model_type": "custom_model",
|
|
|
|
| 12 |
"transformers_version": "4.41.2"
|
| 13 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"SpeechLLMModel"
|
| 4 |
+
],
|
| 5 |
"audio_enc_dim": 1024,
|
| 6 |
"audio_encoder_name": "microsoft/wavlm-large",
|
| 7 |
"audio_processor_name": "microsoft/wavlm-base",
|
| 8 |
"auto_map": {
|
| 9 |
+
"AutoConfig": "config.SpeechLLMModelConfig",
|
| 10 |
+
"AutoModel": "model.SpeechLLMModel"
|
| 11 |
},
|
| 12 |
"llm_dim": 2048,
|
| 13 |
"llm_model_checkpoint": "hf_repo/llm_model_checkpoint",
|
| 14 |
"llm_model_name": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
| 15 |
"model_type": "custom_model",
|
| 16 |
+
"torch_dtype": "float32",
|
| 17 |
"transformers_version": "4.41.2"
|
| 18 |
}
|
model.py
CHANGED
|
@@ -1,14 +1,15 @@
|
|
| 1 |
import torch
|
| 2 |
from torch import nn
|
| 3 |
import torchaudio
|
| 4 |
-
from transformers import PreTrainedModel, AutoModelForCausalLM, AutoTokenizer, HubertModel,
|
| 5 |
from .config import SpeechLLMModelConfig
|
| 6 |
from peft import LoraConfig, get_peft_model
|
| 7 |
|
| 8 |
class TransformerAudioEnoder(nn.Module):
|
| 9 |
def __init__(self, model_name='microsoft/wavlm-large', finetune=False):
|
| 10 |
super().__init__()
|
| 11 |
-
|
|
|
|
| 12 |
|
| 13 |
def forward(self, x):
|
| 14 |
return self.encoder(x).last_hidden_state
|
|
@@ -44,11 +45,9 @@ class SpeechLLMModel(PreTrainedModel):
|
|
| 44 |
self.audio_processor = AutoFeatureExtractor.from_pretrained(config.audio_processor_name)
|
| 45 |
self.audio_encoder = TransformerAudioEnoder(config.audio_encoder_name)
|
| 46 |
self.connector = CNNConnector(config.audio_enc_dim, config.llm_dim)
|
| 47 |
-
|
| 48 |
-
# self.llm_model = AutoModelForCausalLM.from_pretrained(config.llm_model_checkpoint)
|
| 49 |
-
# self.llm_tokenizer = AutoTokenizer.from_pretrained(config.llm_model_name)
|
| 50 |
|
| 51 |
-
|
|
|
|
| 52 |
self.llm_tokenizer = AutoTokenizer.from_pretrained(config.llm_model_name)
|
| 53 |
|
| 54 |
peft_config = LoraConfig(
|
|
|
|
| 1 |
import torch
|
| 2 |
from torch import nn
|
| 3 |
import torchaudio
|
| 4 |
+
from transformers import PreTrainedModel, AutoModelForCausalLM, AutoTokenizer, HubertModel, AutoProcessor, AutoConfig, AutoModel, AutoFeatureExtractor
|
| 5 |
from .config import SpeechLLMModelConfig
|
| 6 |
from peft import LoraConfig, get_peft_model
|
| 7 |
|
| 8 |
class TransformerAudioEnoder(nn.Module):
|
| 9 |
def __init__(self, model_name='microsoft/wavlm-large', finetune=False):
|
| 10 |
super().__init__()
|
| 11 |
+
config = AutoConfig.from_pretrained(model_name)
|
| 12 |
+
self.encoder = AutoModel.from_config(config)
|
| 13 |
|
| 14 |
def forward(self, x):
|
| 15 |
return self.encoder(x).last_hidden_state
|
|
|
|
| 45 |
self.audio_processor = AutoFeatureExtractor.from_pretrained(config.audio_processor_name)
|
| 46 |
self.audio_encoder = TransformerAudioEnoder(config.audio_encoder_name)
|
| 47 |
self.connector = CNNConnector(config.audio_enc_dim, config.llm_dim)
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
llm_config = AutoConfig.from_pretrained(config.llm_model_name)
|
| 50 |
+
self.llm_model = AutoModelForCausalLM.from_config(llm_config)
|
| 51 |
self.llm_tokenizer = AutoTokenizer.from_pretrained(config.llm_model_name)
|
| 52 |
|
| 53 |
peft_config = LoraConfig(
|