tiny ramdom models
Collection
105 items • Updated • 8
How to use tiny-random/gemma-3 with Transformers:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("image-text-to-text", model="tiny-random/gemma-3")
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
{"type": "text", "text": "What animal is on the candy?"}
]
},
]
pipe(text=messages) # Load model directly
from transformers import AutoProcessor, AutoModelForImageTextToText
processor = AutoProcessor.from_pretrained("tiny-random/gemma-3")
model = AutoModelForImageTextToText.from_pretrained("tiny-random/gemma-3")
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
{"type": "text", "text": "What animal is on the candy?"}
]
},
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))How to use tiny-random/gemma-3 with vLLM:
# Install vLLM from pip:
pip install vllm
# Start the vLLM server:
vllm serve "tiny-random/gemma-3"
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:8000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "tiny-random/gemma-3",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in one sentence."
},
{
"type": "image_url",
"image_url": {
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
}
}
]
}
]
}'docker model run hf.co/tiny-random/gemma-3
How to use tiny-random/gemma-3 with SGLang:
# Install SGLang from pip:
pip install sglang
# Start the SGLang server:
python3 -m sglang.launch_server \
--model-path "tiny-random/gemma-3" \
--host 0.0.0.0 \
--port 30000
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "tiny-random/gemma-3",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in one sentence."
},
{
"type": "image_url",
"image_url": {
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
}
}
]
}
]
}'docker run --gpus all \
--shm-size 32g \
-p 30000:30000 \
-v ~/.cache/huggingface:/root/.cache/huggingface \
--env "HF_TOKEN=<secret>" \
--ipc=host \
lmsysorg/sglang:latest \
python3 -m sglang.launch_server \
--model-path "tiny-random/gemma-3" \
--host 0.0.0.0 \
--port 30000
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/chat/completions" \
-H "Content-Type: application/json" \
--data '{
"model": "tiny-random/gemma-3",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in one sentence."
},
{
"type": "image_url",
"image_url": {
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
}
}
]
}
]
}'How to use tiny-random/gemma-3 with Docker Model Runner:
docker model run hf.co/tiny-random/gemma-3
# Load model directly
from transformers import AutoProcessor, AutoModelForImageTextToText
processor = AutoProcessor.from_pretrained("tiny-random/gemma-3")
model = AutoModelForImageTextToText.from_pretrained("tiny-random/gemma-3")
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
{"type": "text", "text": "What animal is on the candy?"}
]
},
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))This tiny model is for debugging. It is randomly initialized with the config adapted from google/gemma-3-27b-it.
from transformers import pipeline
model_id = "tiny-random/gemma-3"
pipe = pipeline(
"image-text-to-text", model=model_id, device="cuda",
trust_remote_code=True, max_new_tokens=3,
)
messages = [
{
"role": "system",
"content": [{"type": "text", "text": "You are a helpful assistant."}]
},
{
"role": "user",
"content": [
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
{"type": "text", "text": "What animal is on the candy?"}
]
}
]
output = pipe(text=messages, max_new_tokens=5)
print(output)
import torch
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoProcessor,
AutoTokenizer,
Gemma3ForConditionalGeneration,
GenerationConfig,
pipeline,
set_seed,
)
source_model_id = "google/gemma-3-27b-it"
save_folder = "/tmp/tiny-random/gemma-3"
processor = AutoProcessor.from_pretrained(
source_model_id, trust_remote_code=True,
)
processor.save_pretrained(save_folder)
config = AutoConfig.from_pretrained(
source_model_id, trust_remote_code=True,
)
config.text_config.hidden_size = 32
config.text_config.intermediate_size = 128
config.text_config.head_dim = 32
config.text_config.num_attention_heads = 1
config.text_config.num_key_value_heads = 1
config.text_config.num_hidden_layers = 2
config.text_config.sliding_window_pattern = 2
config.vision_config.hidden_size = 32
config.vision_config.num_hidden_layers = 2
config.vision_config.num_attention_heads = 1
config.vision_config.intermediate_size = 128
model = Gemma3ForConditionalGeneration(
config,
).to(torch.bfloat16)
for layer in model.language_model.model.layers:
print(layer.is_sliding)
model.generation_config = GenerationConfig.from_pretrained(
source_model_id, trust_remote_code=True,
)
set_seed(42)
with torch.no_grad():
for name, p in sorted(model.named_parameters()):
torch.nn.init.normal_(p, 0, 0.5)
print(name, p.shape)
model.save_pretrained(save_folder)
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("image-text-to-text", model="tiny-random/gemma-3") messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] pipe(text=messages)