import json
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing importAny, Dict, List, Literal, Optional, Sequence, Set, Tuple, Uniondefcalculate_gpa(grades: Sequence[str], hours: Sequence[int]) -> float:
grade_to_score = {"A": 4, "B": 3, "C": 2}
total_score, total_hour = 0, 0for grade, hour inzip(grades, hours):
total_score += grade_to_score[grade] * hour
total_hour += hour
returnround(total_score / total_hour, 2)
tool_map = {"calculate_gpa": calculate_gpa}
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-7B-Instruct")
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-7B-Instruct",
torch_dtype="auto", device_map="auto", load_in_8bit = True)
model = PeftModel.from_pretrained(model, "svjack/Qwen2-7B_Function_Call_tiny_lora")
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
SLOTS = Sequence[Union[str, Set[str], Dict[str, str]]]
DEFAULT_TOOL_PROMPT = (
"You have access to the following tools:\n{tool_text}""Use the following format if using a tool:\n""```\n""Action: tool name (one of [{tool_names}]).\n""Action Input: the input to the tool, in a JSON format representing the kwargs """"(e.g. ```{{"input": "hello world", "num_beams": 5}}```).\n""""```\n"
)
defdefault_tool_formatter(tools: List[Dict[str, Any]]) -> str:
tool_text = ""
tool_names = []
for tool in tools:
param_text = ""for name, param in tool["parameters"]["properties"].items():
required = ", required"if name in tool["parameters"].get("required", []) else""
enum = ", should be one of [{}]".format(", ".join(param["enum"])) if param.get("enum", None) else""
items = (
", where each item should be {}".format(param["items"].get("type", "")) if param.get("items") else""
)
param_text += " - {name} ({type}{required}): {desc}{enum}{items}\n".format(
name=name,
type=param.get("type", ""),
required=required,
desc=param.get("description", ""),
enum=enum,
items=items,
)
tool_text += "> Tool Name: {name}\nTool Description: {desc}\nTool Args:\n{args}\n".format(
name=tool["name"], desc=tool.get("description", ""), args=param_text
)
tool_names.append(tool["name"])
return DEFAULT_TOOL_PROMPT.format(tool_text=tool_text, tool_names=", ".join(tool_names))
defdefault_tool_extractor(content: str) -> Union[str, List[Tuple[str, str]]]:
regex = re.compile(r"Action:\s*([a-zA-Z0-9_]+)\s*Action Input:\s*(.+?)(?=\s*Action:|\s*$)", re.DOTALL)
action_match: List[Tuple[str, str]] = re.findall(regex, content)
ifnot action_match:
return content
results = []
formatchin action_match:
tool_name = match[0].strip()
tool_input = match[1].strip().strip('"').strip("```")
try:
arguments = json.loads(tool_input)
results.append((tool_name, json.dumps(arguments, ensure_ascii=False)))
except json.JSONDecodeError:
return content
return results
#### Function tool defination
tools = [
{
"type": "function",
"function": {
"name": "calculate_gpa",
"description": "Calculate the Grade Point Average (GPA) based on grades and credit hours",
"parameters": {
"type": "object",
"properties": {
"grades": {"type": "array", "items": {"type": "string"}, "description": "The grades"},
"hours": {"type": "array", "items": {"type": "integer"}, "description": "The credit hours"},
},
"required": ["grades", "hours"],
},
},
}
]
tools_input = list(map(lambda x: x["function"], tools))
system_tool_prompt = default_tool_formatter(tools_input)
#print(system_tool_prompt)defqwen_hf_predict(messages, qw_model = model, tokenizer = tokenizer, streamer = streamer, do_sample = True, top_p = 0.95, top_k = 40, max_new_tokens = 512, max_input_length = 3500, temperature = 0.9, repetition_penalty = 1.0, device = "cuda"):
encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt",
add_generation_prompt=True
)
model_inputs = encodeds.to(device)
generated_ids = qw_model.generate(model_inputs, max_new_tokens=max_new_tokens,
do_sample=do_sample,
streamer = streamer,
top_p = top_p,
top_k = top_k,
temperature = temperature,
repetition_penalty = repetition_penalty,
)
out = tokenizer.batch_decode(generated_ids)[0].split("<|im_start|>assistant")[-1].replace("<|im_end|>", "").strip()
return out
messages = [
{
"role" :"system",
"content": system_tool_prompt
},
{"role": "user", "content": "My grades are A, A, B, and C. The credit hours are 3, 4, 3, and 2."}
]
out = qwen_hf_predict(messages)
tool_out = default_tool_extractor(out)
print(tool_out)
name, arguments = tool_out[0][0], json.loads(tool_out[0][1])
tool_result = tool_map[name](**arguments)
print(tool_result)
messages.append(
{
"role" :"assistant",
"content": out
}
)
messages.append({"role": "tool", "content": json.dumps({"gpa": tool_result}, ensure_ascii=False)})
final_out = qwen_hf_predict(messages)
print(final_out)
Output
Action: calculate_gpa
Action Input: {"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}
[('calculate_gpa', '{"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}')]
3.42
Based on the grades and credit hours you provided, your Grade Point Average (GPA) is 3.42.
Qwen2-7B_Function_Call_tiny_lora huggingface.co is an AI model on huggingface.co that provides Qwen2-7B_Function_Call_tiny_lora's model effect (), which can be used instantly with this svjack Qwen2-7B_Function_Call_tiny_lora model. huggingface.co supports a free trial of the Qwen2-7B_Function_Call_tiny_lora model, and also provides paid use of the Qwen2-7B_Function_Call_tiny_lora. Support call Qwen2-7B_Function_Call_tiny_lora model through api, including Node.js, Python, http.
Qwen2-7B_Function_Call_tiny_lora huggingface.co is an online trial and call api platform, which integrates Qwen2-7B_Function_Call_tiny_lora's modeling effects, including api services, and provides a free online trial of Qwen2-7B_Function_Call_tiny_lora, you can try Qwen2-7B_Function_Call_tiny_lora online for free by clicking the link below.
svjack Qwen2-7B_Function_Call_tiny_lora online free url in huggingface.co:
Qwen2-7B_Function_Call_tiny_lora is an open source model from GitHub that offers a free installation service, and any user can find Qwen2-7B_Function_Call_tiny_lora on GitHub to install. At the same time, huggingface.co provides the effect of Qwen2-7B_Function_Call_tiny_lora install, users can directly use Qwen2-7B_Function_Call_tiny_lora installed effect in huggingface.co for debugging and trial. It also supports api for free installation.
Qwen2-7B_Function_Call_tiny_lora install url in huggingface.co: