How to use from
SGLang
Install from pip and serve model
# Install SGLang from pip:
pip install sglang
# Start the SGLang server:
python3 -m sglang.launch_server \
    --model-path "kdf/python-docstring-generation" \
    --host 0.0.0.0 \
    --port 30000
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/completions" \
	-H "Content-Type: application/json" \
	--data '{
		"model": "kdf/python-docstring-generation",
		"prompt": "Once upon a time,",
		"max_tokens": 512,
		"temperature": 0.5
	}'
Use Docker images
docker run --gpus all \
    --shm-size 32g \
    -p 30000:30000 \
    -v ~/.cache/huggingface:/root/.cache/huggingface \
    --env "HF_TOKEN=<secret>" \
    --ipc=host \
    lmsysorg/sglang:latest \
    python3 -m sglang.launch_server \
        --model-path "kdf/python-docstring-generation" \
        --host 0.0.0.0 \
        --port 30000
# Call the server using curl (OpenAI-compatible API):
curl -X POST "http://localhost:30000/v1/completions" \
	-H "Content-Type: application/json" \
	--data '{
		"model": "kdf/python-docstring-generation",
		"prompt": "Once upon a time,",
		"max_tokens": 512,
		"temperature": 0.5
	}'
Quick Links

Basic info

model based Salesforce/codegen-350M-mono

fine-tuned with data codeparrot/github-code-clean

data filter by python

Usage

from transformers import AutoTokenizer, AutoModelForCausalLM

model_type = 'kdf/python-docstring-generation'
tokenizer = AutoTokenizer.from_pretrained(model_type)
model = AutoModelForCausalLM.from_pretrained(model_type)

inputs = tokenizer('''<|endoftext|>
def load_excel(path):
    return pd.read_excel(path)

# docstring
"""''', return_tensors='pt')

doc_max_length = 128

generated_ids = model.generate(
    **inputs,
    max_length=inputs.input_ids.shape[1] + doc_max_length,
    do_sample=False,
    return_dict_in_generate=True,
    num_return_sequences=1,
    output_scores=True,
    pad_token_id=50256,
    eos_token_id=50256  # <|endoftext|>
)

ret = tokenizer.decode(generated_ids.sequences[0], skip_special_tokens=False)
print(ret)

Prompt

You could give model a style or a specific language, for example:

inputs = tokenizer('''<|endoftext|>
def add(a, b):
    return a + b

# docstring
"""
    Calculate numbers add.

    Args:
        a: the first number to add
        b: the second number to add

    Return:
        The result of a + b
"""
<|endoftext|>
def load_excel(path):
    return pd.read_excel(path)

# docstring
"""''', return_tensors='pt')

doc_max_length = 128

generated_ids = model.generate(
    **inputs,
    max_length=inputs.input_ids.shape[1] + doc_max_length,
    do_sample=False,
    return_dict_in_generate=True,
    num_return_sequences=1,
    output_scores=True,
    pad_token_id=50256,
    eos_token_id=50256  # <|endoftext|>
)

ret = tokenizer.decode(generated_ids.sequences[0], skip_special_tokens=False)
print(ret)

inputs = tokenizer('''<|endoftext|>
def add(a, b):
    return a + b

# docstring
"""
    计算数字相加

    Args:
        a: 第一个加数
        b: 第二个加数

    Return:
        相加的结果
"""
<|endoftext|>
def load_excel(path):
    return pd.read_excel(path)

# docstring
"""''', return_tensors='pt')

doc_max_length = 128

generated_ids = model.generate(
    **inputs,
    max_length=inputs.input_ids.shape[1] + doc_max_length,
    do_sample=False,
    return_dict_in_generate=True,
    num_return_sequences=1,
    output_scores=True,
    pad_token_id=50256,
    eos_token_id=50256  # <|endoftext|>
)

ret = tokenizer.decode(generated_ids.sequences[0], skip_special_tokens=False)
print(ret)
Downloads last month
21
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support