davidrrobinson/AnimalSpeak
Viewer • Updated • 894k • 37 • 6
How to use davidrrobinson/BioLingual with Transformers:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("feature-extraction", model="davidrrobinson/BioLingual") # Load model directly
from transformers import AutoProcessor, AutoModel
processor = AutoProcessor.from_pretrained("davidrrobinson/BioLingual")
model = AutoModel.from_pretrained("davidrrobinson/BioLingual")Model card for BioLingual: Transferable Models for bioacoustics with Human Language Supervision
An audio-text model for bioacoustics based on contrastive language-audio pretraining.
You can use this model for bioacoustic zero shot audio classification, or for fine-tuning on bioacoustic tasks.
pipeline
from datasets import load_dataset
from transformers import pipeline
dataset = load_dataset("ashraq/esc50")
audio = dataset["train"]["audio"][-1]["array"]
audio_classifier = pipeline(task="zero-shot-audio-classification", model="davidrrobinson/BioLingual")
output = audio_classifier(audio, candidate_labels=["Sound of a sperm whale", "Sound of a sea lion"])
print(output)
>>> [{"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}]
You can also get the audio and text embeddings using ClapModel
from datasets import load_dataset
from transformers import ClapModel, ClapProcessor
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_sample = librispeech_dummy[0]
model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
processor = ClapProcessor.from_pretrained("laion/clap-htsat-unfused")
inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt")
audio_embed = model.get_audio_features(**inputs)
from datasets import load_dataset
from transformers import ClapModel, ClapProcessor
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_sample = librispeech_dummy[0]
model = ClapModel.from_pretrained("laion/clap-htsat-unfused").to(0)
processor = ClapProcessor.from_pretrained("laion/clap-htsat-unfused")
inputs = processor(audios=audio_sample["audio"]["array"], return_tensors="pt").to(0)
audio_embed = model.get_audio_features(**inputs)