add
This commit is contained in:
0
models/LLaVA/llava/serve/__init__.py
Normal file
0
models/LLaVA/llava/serve/__init__.py
Normal file
153
models/LLaVA/llava/serve/cli.py
Normal file
153
models/LLaVA/llava/serve/cli.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""
|
||||
Usage:
|
||||
python3 -m fastchat.serve.cli --model ~/model_weights/llama-7b
|
||||
"""
|
||||
import argparse
|
||||
import time
|
||||
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
from llava.conversation import conv_templates, SeparatorStyle
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate_stream(tokenizer, model, params, device,
|
||||
context_len=2048, stream_interval=2):
|
||||
"""Adapted from fastchat/serve/model_worker.py::generate_stream"""
|
||||
|
||||
prompt = params["prompt"]
|
||||
l_prompt = len(prompt)
|
||||
temperature = float(params.get("temperature", 1.0))
|
||||
max_new_tokens = int(params.get("max_new_tokens", 256))
|
||||
stop_str = params.get("stop", None)
|
||||
|
||||
input_ids = tokenizer(prompt).input_ids
|
||||
output_ids = list(input_ids)
|
||||
|
||||
max_src_len = context_len - max_new_tokens - 8
|
||||
input_ids = input_ids[-max_src_len:]
|
||||
|
||||
for i in range(max_new_tokens):
|
||||
if i == 0:
|
||||
out = model(
|
||||
torch.as_tensor([input_ids], device=device), use_cache=True)
|
||||
logits = out.logits
|
||||
past_key_values = out.past_key_values
|
||||
else:
|
||||
attention_mask = torch.ones(
|
||||
1, past_key_values[0][0].shape[-2] + 1, device=device)
|
||||
out = model(input_ids=torch.as_tensor([[token]], device=device),
|
||||
use_cache=True,
|
||||
attention_mask=attention_mask,
|
||||
past_key_values=past_key_values)
|
||||
logits = out.logits
|
||||
past_key_values = out.past_key_values
|
||||
|
||||
last_token_logits = logits[0][-1]
|
||||
if temperature < 1e-4:
|
||||
token = int(torch.argmax(last_token_logits))
|
||||
else:
|
||||
probs = torch.softmax(last_token_logits / temperature, dim=-1)
|
||||
token = int(torch.multinomial(probs, num_samples=1))
|
||||
|
||||
output_ids.append(token)
|
||||
|
||||
if token == tokenizer.eos_token_id:
|
||||
stopped = True
|
||||
else:
|
||||
stopped = False
|
||||
|
||||
if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
|
||||
output = tokenizer.decode(output_ids, skip_special_tokens=True)
|
||||
pos = output.rfind(stop_str, l_prompt)
|
||||
if pos != -1:
|
||||
output = output[:pos]
|
||||
stopped = True
|
||||
yield output
|
||||
|
||||
if stopped:
|
||||
break
|
||||
|
||||
del past_key_values
|
||||
|
||||
|
||||
def main(args):
|
||||
model_name = args.model_name
|
||||
num_gpus = args.num_gpus
|
||||
|
||||
# Model
|
||||
if args.device == "cuda":
|
||||
kwargs = {"torch_dtype": torch.float16}
|
||||
if num_gpus == "auto":
|
||||
kwargs["device_map"] = "auto"
|
||||
else:
|
||||
num_gpus = int(num_gpus)
|
||||
if num_gpus != 1:
|
||||
kwargs.update({
|
||||
"device_map": "auto",
|
||||
"max_memory": {i: "13GiB" for i in range(num_gpus)},
|
||||
})
|
||||
elif args.device == "cpu":
|
||||
kwargs = {}
|
||||
else:
|
||||
raise ValueError(f"Invalid device: {args.device}")
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name,
|
||||
low_cpu_mem_usage=True, **kwargs)
|
||||
|
||||
if args.device == "cuda" and num_gpus == 1:
|
||||
model.cuda()
|
||||
|
||||
# Chat
|
||||
conv = conv_templates[args.conv_template].copy()
|
||||
while True:
|
||||
try:
|
||||
inp = input(f"{conv.roles[0]}: ")
|
||||
except EOFError:
|
||||
inp = ""
|
||||
if not inp:
|
||||
print("exit...")
|
||||
break
|
||||
|
||||
conv.append_message(conv.roles[0], inp)
|
||||
conv.append_message(conv.roles[1], None)
|
||||
prompt = conv.get_prompt()
|
||||
|
||||
params = {
|
||||
"model": model_name,
|
||||
"prompt": prompt,
|
||||
"temperature": args.temperature,
|
||||
"max_new_tokens": args.max_new_tokens,
|
||||
"stop": conv.sep if conv.sep_style == SeparatorStyle.SINGLE else conv.sep2,
|
||||
}
|
||||
|
||||
print(f"{conv.roles[1]}: ", end="", flush=True)
|
||||
pre = 0
|
||||
for outputs in generate_stream(tokenizer, model, params, args.device):
|
||||
outputs = outputs[len(prompt) + 1:].strip()
|
||||
outputs = outputs.split(" ")
|
||||
now = len(outputs)
|
||||
if now - 1 > pre:
|
||||
print(" ".join(outputs[pre:now-1]), end=" ", flush=True)
|
||||
pre = now - 1
|
||||
print(" ".join(outputs[pre:]), flush=True)
|
||||
|
||||
conv.messages[-1][-1] = " ".join(outputs)
|
||||
|
||||
if args.debug:
|
||||
print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
|
||||
parser.add_argument("--num-gpus", type=str, default="1")
|
||||
parser.add_argument("--device", type=str, choices=["cuda", "cpu"], default="cuda")
|
||||
parser.add_argument("--conv-template", type=str, default="v1")
|
||||
parser.add_argument("--temperature", type=float, default=0.7)
|
||||
parser.add_argument("--max-new-tokens", type=int, default=512)
|
||||
parser.add_argument("--debug", action="store_true")
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
298
models/LLaVA/llava/serve/controller.py
Normal file
298
models/LLaVA/llava/serve/controller.py
Normal file
@@ -0,0 +1,298 @@
|
||||
"""
|
||||
A controller manages distributed workers.
|
||||
It sends worker addresses to clients.
|
||||
"""
|
||||
import argparse
|
||||
import asyncio
|
||||
import dataclasses
|
||||
from enum import Enum, auto
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import List, Union
|
||||
import threading
|
||||
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
import numpy as np
|
||||
import requests
|
||||
import uvicorn
|
||||
|
||||
from llava.constants import CONTROLLER_HEART_BEAT_EXPIRATION
|
||||
from llava.utils import build_logger, server_error_msg
|
||||
|
||||
|
||||
logger = build_logger("controller", "controller.log")
|
||||
|
||||
|
||||
class DispatchMethod(Enum):
|
||||
LOTTERY = auto()
|
||||
SHORTEST_QUEUE = auto()
|
||||
|
||||
@classmethod
|
||||
def from_str(cls, name):
|
||||
if name == "lottery":
|
||||
return cls.LOTTERY
|
||||
elif name == "shortest_queue":
|
||||
return cls.SHORTEST_QUEUE
|
||||
else:
|
||||
raise ValueError(f"Invalid dispatch method")
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class WorkerInfo:
|
||||
model_names: List[str]
|
||||
speed: int
|
||||
queue_length: int
|
||||
check_heart_beat: bool
|
||||
last_heart_beat: str
|
||||
|
||||
|
||||
def heart_beat_controller(controller):
|
||||
while True:
|
||||
time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION)
|
||||
controller.remove_stable_workers_by_expiration()
|
||||
|
||||
|
||||
class Controller:
|
||||
def __init__(self, dispatch_method: str):
|
||||
# Dict[str -> WorkerInfo]
|
||||
self.worker_info = {}
|
||||
self.dispatch_method = DispatchMethod.from_str(dispatch_method)
|
||||
|
||||
self.heart_beat_thread = threading.Thread(
|
||||
target=heart_beat_controller, args=(self,))
|
||||
self.heart_beat_thread.start()
|
||||
|
||||
logger.info("Init controller")
|
||||
|
||||
def register_worker(self, worker_name: str, check_heart_beat: bool,
|
||||
worker_status: dict):
|
||||
if worker_name not in self.worker_info:
|
||||
logger.info(f"Register a new worker: {worker_name}")
|
||||
else:
|
||||
logger.info(f"Register an existing worker: {worker_name}")
|
||||
|
||||
if not worker_status:
|
||||
worker_status = self.get_worker_status(worker_name)
|
||||
if not worker_status:
|
||||
return False
|
||||
|
||||
self.worker_info[worker_name] = WorkerInfo(
|
||||
worker_status["model_names"], worker_status["speed"], worker_status["queue_length"],
|
||||
check_heart_beat, time.time())
|
||||
|
||||
logger.info(f"Register done: {worker_name}, {worker_status}")
|
||||
return True
|
||||
|
||||
def get_worker_status(self, worker_name: str):
|
||||
try:
|
||||
r = requests.post(worker_name + "/worker_get_status", timeout=5)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Get status fails: {worker_name}, {e}")
|
||||
return None
|
||||
|
||||
if r.status_code != 200:
|
||||
logger.error(f"Get status fails: {worker_name}, {r}")
|
||||
return None
|
||||
|
||||
return r.json()
|
||||
|
||||
def remove_worker(self, worker_name: str):
|
||||
del self.worker_info[worker_name]
|
||||
|
||||
def refresh_all_workers(self):
|
||||
old_info = dict(self.worker_info)
|
||||
self.worker_info = {}
|
||||
|
||||
for w_name, w_info in old_info.items():
|
||||
if not self.register_worker(w_name, w_info.check_heart_beat, None):
|
||||
logger.info(f"Remove stale worker: {w_name}")
|
||||
|
||||
def list_models(self):
|
||||
model_names = set()
|
||||
|
||||
for w_name, w_info in self.worker_info.items():
|
||||
model_names.update(w_info.model_names)
|
||||
|
||||
return list(model_names)
|
||||
|
||||
def get_worker_address(self, model_name: str):
|
||||
if self.dispatch_method == DispatchMethod.LOTTERY:
|
||||
worker_names = []
|
||||
worker_speeds = []
|
||||
for w_name, w_info in self.worker_info.items():
|
||||
if model_name in w_info.model_names:
|
||||
worker_names.append(w_name)
|
||||
worker_speeds.append(w_info.speed)
|
||||
worker_speeds = np.array(worker_speeds, dtype=np.float32)
|
||||
norm = np.sum(worker_speeds)
|
||||
if norm < 1e-4:
|
||||
return ""
|
||||
worker_speeds = worker_speeds / norm
|
||||
if True: # Directly return address
|
||||
pt = np.random.choice(np.arange(len(worker_names)),
|
||||
p=worker_speeds)
|
||||
worker_name = worker_names[pt]
|
||||
return worker_name
|
||||
|
||||
# Check status before returning
|
||||
while True:
|
||||
pt = np.random.choice(np.arange(len(worker_names)),
|
||||
p=worker_speeds)
|
||||
worker_name = worker_names[pt]
|
||||
|
||||
if self.get_worker_status(worker_name):
|
||||
break
|
||||
else:
|
||||
self.remove_worker(worker_name)
|
||||
worker_speeds[pt] = 0
|
||||
norm = np.sum(worker_speeds)
|
||||
if norm < 1e-4:
|
||||
return ""
|
||||
worker_speeds = worker_speeds / norm
|
||||
continue
|
||||
return worker_name
|
||||
elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE:
|
||||
worker_names = []
|
||||
worker_qlen = []
|
||||
for w_name, w_info in self.worker_info.items():
|
||||
if model_name in w_info.model_names:
|
||||
worker_names.append(w_name)
|
||||
worker_qlen.append(w_info.queue_length / w_info.speed)
|
||||
if len(worker_names) == 0:
|
||||
return ""
|
||||
min_index = np.argmin(worker_qlen)
|
||||
w_name = worker_names[min_index]
|
||||
self.worker_info[w_name].queue_length += 1
|
||||
logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}")
|
||||
return w_name
|
||||
else:
|
||||
raise ValueError(f"Invalid dispatch method: {self.dispatch_method}")
|
||||
|
||||
def receive_heart_beat(self, worker_name: str, queue_length: int):
|
||||
if worker_name not in self.worker_info:
|
||||
logger.info(f"Receive unknown heart beat. {worker_name}")
|
||||
return False
|
||||
|
||||
self.worker_info[worker_name].queue_length = queue_length
|
||||
self.worker_info[worker_name].last_heart_beat = time.time()
|
||||
logger.info(f"Receive heart beat. {worker_name}")
|
||||
return True
|
||||
|
||||
def remove_stable_workers_by_expiration(self):
|
||||
expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION
|
||||
to_delete = []
|
||||
for worker_name, w_info in self.worker_info.items():
|
||||
if w_info.check_heart_beat and w_info.last_heart_beat < expire:
|
||||
to_delete.append(worker_name)
|
||||
|
||||
for worker_name in to_delete:
|
||||
self.remove_worker(worker_name)
|
||||
|
||||
def worker_api_generate_stream(self, params):
|
||||
worker_addr = self.get_worker_address(params["model"])
|
||||
if not worker_addr:
|
||||
logger.info(f"no worker: {params['model']}")
|
||||
ret = {
|
||||
"text": server_error_msg,
|
||||
"error_code": 2,
|
||||
}
|
||||
yield json.dumps(ret).encode() + b"\0"
|
||||
|
||||
try:
|
||||
response = requests.post(worker_addr + "/worker_generate_stream",
|
||||
json=params, stream=True, timeout=5)
|
||||
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
|
||||
if chunk:
|
||||
yield chunk + b"\0"
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.info(f"worker timeout: {worker_addr}")
|
||||
ret = {
|
||||
"text": server_error_msg,
|
||||
"error_code": 3,
|
||||
}
|
||||
yield json.dumps(ret).encode() + b"\0"
|
||||
|
||||
|
||||
# Let the controller act as a worker to achieve hierarchical
|
||||
# management. This can be used to connect isolated sub networks.
|
||||
def worker_api_get_status(self):
|
||||
model_names = set()
|
||||
speed = 0
|
||||
queue_length = 0
|
||||
|
||||
for w_name in self.worker_info:
|
||||
worker_status = self.get_worker_status(w_name)
|
||||
if worker_status is not None:
|
||||
model_names.update(worker_status["model_names"])
|
||||
speed += worker_status["speed"]
|
||||
queue_length += worker_status["queue_length"]
|
||||
|
||||
return {
|
||||
"model_names": list(model_names),
|
||||
"speed": speed,
|
||||
"queue_length": queue_length,
|
||||
}
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
|
||||
@app.post("/register_worker")
|
||||
async def register_worker(request: Request):
|
||||
data = await request.json()
|
||||
controller.register_worker(
|
||||
data["worker_name"], data["check_heart_beat"],
|
||||
data.get("worker_status", None))
|
||||
|
||||
|
||||
@app.post("/refresh_all_workers")
|
||||
async def refresh_all_workers():
|
||||
models = controller.refresh_all_workers()
|
||||
|
||||
|
||||
@app.post("/list_models")
|
||||
async def list_models():
|
||||
models = controller.list_models()
|
||||
return {"models": models}
|
||||
|
||||
|
||||
@app.post("/get_worker_address")
|
||||
async def get_worker_address(request: Request):
|
||||
data = await request.json()
|
||||
addr = controller.get_worker_address(data["model"])
|
||||
return {"address": addr}
|
||||
|
||||
|
||||
@app.post("/receive_heart_beat")
|
||||
async def receive_heart_beat(request: Request):
|
||||
data = await request.json()
|
||||
exist = controller.receive_heart_beat(
|
||||
data["worker_name"], data["queue_length"])
|
||||
return {"exist": exist}
|
||||
|
||||
|
||||
@app.post("/worker_generate_stream")
|
||||
async def worker_api_generate_stream(request: Request):
|
||||
params = await request.json()
|
||||
generator = controller.worker_api_generate_stream(params)
|
||||
return StreamingResponse(generator)
|
||||
|
||||
|
||||
@app.post("/worker_get_status")
|
||||
async def worker_api_get_status(request: Request):
|
||||
return controller.worker_api_get_status()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="localhost")
|
||||
parser.add_argument("--port", type=int, default=21001)
|
||||
parser.add_argument("--dispatch-method", type=str, choices=[
|
||||
"lottery", "shortest_queue"], default="shortest_queue")
|
||||
args = parser.parse_args()
|
||||
logger.info(f"args: {args}")
|
||||
|
||||
controller = Controller(args.dispatch_method)
|
||||
uvicorn.run(app, host=args.host, port=args.port, log_level="info")
|
||||
BIN
models/LLaVA/llava/serve/examples/extreme_ironing.jpg
Normal file
BIN
models/LLaVA/llava/serve/examples/extreme_ironing.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 61 KiB |
BIN
models/LLaVA/llava/serve/examples/waterview.jpg
Normal file
BIN
models/LLaVA/llava/serve/examples/waterview.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 93 KiB |
57
models/LLaVA/llava/serve/gateway/README.md
Normal file
57
models/LLaVA/llava/serve/gateway/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# fastchat Nginx Gateway
|
||||
|
||||
## Purpose of the Gateway
|
||||
|
||||
The Nginx gateway serves the following purposes:
|
||||
|
||||
1. Protects Gradio servers by acting as a firewall.
|
||||
2. Facilitates dynamic mounting and unmounting of Gradio servers.
|
||||
3. Provides load balancing for Gradio servers.
|
||||
4. Offers additional security features, such as total connection limit.
|
||||
5. Reduces attack surface by requiring only a single public port to be exposed for serving.
|
||||
|
||||
## Deployment and Updating of the Gateway
|
||||
|
||||
### Installing Nginx
|
||||
|
||||
On Debian-based distributions (e.g., Ubuntu):
|
||||
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install nginx
|
||||
```
|
||||
On Red Hat-based distributions (e.g., CentOS, Fedora):
|
||||
|
||||
```bash
|
||||
sudo yum install epel-release
|
||||
sudo yum install nginx
|
||||
```
|
||||
|
||||
### Deployment
|
||||
|
||||
Copy `nginx.conf` to `/etc/nginx/nginx.conf` (need sudo permission).
|
||||
|
||||
Replace the port number 7860 in `server localhost:7860` with the port where you deploy the Gradio web server.
|
||||
|
||||
Modify `upstream websocket` to configure Gradio servers behind the gateway.
|
||||
|
||||
Lastly, update Nginx.
|
||||
|
||||
|
||||
### HTTPS Deployment with a Public Domain URL
|
||||
|
||||
Make sure you obtain the HTTPS certificate and the private key used to generate the certificate.
|
||||
|
||||
Fill the addresses to your certificate and private key in the `[PATH_TO_SSL_CERT]` and `[PATH_TO_PRIVATE_KEY]` fields.
|
||||
|
||||
If you have your own domain url to serve the chatbot, replace the chat.lmsys.org url with your own domain url.
|
||||
|
||||
### Updating
|
||||
|
||||
Every time when `/etc/nginx/nginx.conf` is modified, you need to update the Nginx service:
|
||||
|
||||
```bash
|
||||
sudo nginx -t # check `/etc/nginx/nginx.conf`
|
||||
sudo systemctl reload nginx # restart Nginx service to load the new config
|
||||
sudo systemctl status nginx # check the status of the Nginx service. It should be active (running).
|
||||
```
|
||||
97
models/LLaVA/llava/serve/gateway/nginx.conf
Normal file
97
models/LLaVA/llava/serve/gateway/nginx.conf
Normal file
@@ -0,0 +1,97 @@
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
pid /run/nginx.pid;
|
||||
include /etc/nginx/modules-enabled/*.conf;
|
||||
|
||||
events {
|
||||
worker_connections 1024; # maximum number of connections that a worker process can handle concurrently
|
||||
# multi_accept on; # enabling multi_accept can help improve performance under high load, but may increase the number of simultaneous connections that a worker process can handle
|
||||
|
||||
}
|
||||
|
||||
http {
|
||||
##
|
||||
# Basic Settings
|
||||
##
|
||||
|
||||
sendfile on; # enable sendfile for performance optimization
|
||||
tcp_nopush on; # enable TCP no-pushing
|
||||
tcp_nodelay on; # enable TCP no-delay
|
||||
keepalive_timeout 65; # sets the timeout for keep-alive connections
|
||||
types_hash_max_size 2048; # maximum size of the types hash table
|
||||
# server_tokens off; # disable server token (i.e., server signature) in response headers to improve security
|
||||
|
||||
# server_names_hash_bucket_size 64;
|
||||
# server_name_in_redirect off;
|
||||
|
||||
include /etc/nginx/mime.types; # include MIME types file
|
||||
default_type application/octet-stream; # default MIME type for unknown file types
|
||||
|
||||
##
|
||||
# SSL Settings
|
||||
##
|
||||
|
||||
ssl_protocols TLSv1.2; # specify SSL/TLS protocols to use
|
||||
ssl_prefer_server_ciphers on; # prefer server ciphers over client ciphers
|
||||
|
||||
##
|
||||
# Logging Settings
|
||||
##
|
||||
|
||||
access_log /var/log/nginx/access.log; # path to access log file
|
||||
error_log /var/log/nginx/error.log; # path to error log file
|
||||
|
||||
##
|
||||
# Gzip Settings
|
||||
##
|
||||
gzip on; # enable Gzip compression
|
||||
|
||||
##
|
||||
# Virtual Host Configs
|
||||
##
|
||||
|
||||
include /etc/nginx/conf.d/*.conf; # include all configuration files in conf.d directory
|
||||
include /etc/nginx/sites-enabled/*; # include all enabled sites configuration files
|
||||
|
||||
# WebSocket Proxy: https://www.nginx.com/blog/websocket-nginx/
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
upstream websocket {
|
||||
ip_hash; # load balancing by IP to guarantee session persistence
|
||||
server localhost:7860; # The port should be the gradio web server port
|
||||
# server localhost:7861; # extra gradio server if more than one
|
||||
}
|
||||
|
||||
limit_conn_status 429;
|
||||
limit_conn_zone $binary_remote_addr zone=perip:10m; # limit number of connections per IP
|
||||
limit_conn_zone $server_name zone=perserver:10m; # limit number of connections per server
|
||||
|
||||
server {
|
||||
listen 443 ssl; # the listening port of our server
|
||||
ssl_certificate [PATH_TO_SSL_CERT];
|
||||
ssl_certificate_key [PATH_TO_PRIVATE_KEY];
|
||||
server_name chat.lmsys.org; # replace the url with your own domain url
|
||||
limit_conn perserver 1024; # connections per server
|
||||
location / {
|
||||
proxy_pass http://websocket; # proxy all requests to the defined upstream server
|
||||
limit_conn perip 5; # connections per IP
|
||||
proxy_set_header Host $host; # set the Host header for the upstream server
|
||||
proxy_set_header X-Real-IP $remote_addr; # set the client IP address as the real IP for the upstream server
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; # set the client IP addresses in the X-Forwarded-For header
|
||||
proxy_http_version 1.1; # use HTTP version 1.1 for upstream communication
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade"; # set the Connection header to Upgrade to enable WebSocket communication
|
||||
}
|
||||
}
|
||||
|
||||
# the following block routes all HTTP traffic to HTTPS via nginx
|
||||
server {
|
||||
listen 80;
|
||||
server_name chat.lmsys.org;
|
||||
return 301 https://chat.lmsys.org$request_uri;
|
||||
}
|
||||
|
||||
}
|
||||
73
models/LLaVA/llava/serve/gradio_css.py
Normal file
73
models/LLaVA/llava/serve/gradio_css.py
Normal file
@@ -0,0 +1,73 @@
|
||||
code_highlight_css = (
|
||||
"""
|
||||
#chatbot .hll { background-color: #ffffcc }
|
||||
#chatbot .c { color: #408080; font-style: italic }
|
||||
#chatbot .err { border: 1px solid #FF0000 }
|
||||
#chatbot .k { color: #008000; font-weight: bold }
|
||||
#chatbot .o { color: #666666 }
|
||||
#chatbot .ch { color: #408080; font-style: italic }
|
||||
#chatbot .cm { color: #408080; font-style: italic }
|
||||
#chatbot .cp { color: #BC7A00 }
|
||||
#chatbot .cpf { color: #408080; font-style: italic }
|
||||
#chatbot .c1 { color: #408080; font-style: italic }
|
||||
#chatbot .cs { color: #408080; font-style: italic }
|
||||
#chatbot .gd { color: #A00000 }
|
||||
#chatbot .ge { font-style: italic }
|
||||
#chatbot .gr { color: #FF0000 }
|
||||
#chatbot .gh { color: #000080; font-weight: bold }
|
||||
#chatbot .gi { color: #00A000 }
|
||||
#chatbot .go { color: #888888 }
|
||||
#chatbot .gp { color: #000080; font-weight: bold }
|
||||
#chatbot .gs { font-weight: bold }
|
||||
#chatbot .gu { color: #800080; font-weight: bold }
|
||||
#chatbot .gt { color: #0044DD }
|
||||
#chatbot .kc { color: #008000; font-weight: bold }
|
||||
#chatbot .kd { color: #008000; font-weight: bold }
|
||||
#chatbot .kn { color: #008000; font-weight: bold }
|
||||
#chatbot .kp { color: #008000 }
|
||||
#chatbot .kr { color: #008000; font-weight: bold }
|
||||
#chatbot .kt { color: #B00040 }
|
||||
#chatbot .m { color: #666666 }
|
||||
#chatbot .s { color: #BA2121 }
|
||||
#chatbot .na { color: #7D9029 }
|
||||
#chatbot .nb { color: #008000 }
|
||||
#chatbot .nc { color: #0000FF; font-weight: bold }
|
||||
#chatbot .no { color: #880000 }
|
||||
#chatbot .nd { color: #AA22FF }
|
||||
#chatbot .ni { color: #999999; font-weight: bold }
|
||||
#chatbot .ne { color: #D2413A; font-weight: bold }
|
||||
#chatbot .nf { color: #0000FF }
|
||||
#chatbot .nl { color: #A0A000 }
|
||||
#chatbot .nn { color: #0000FF; font-weight: bold }
|
||||
#chatbot .nt { color: #008000; font-weight: bold }
|
||||
#chatbot .nv { color: #19177C }
|
||||
#chatbot .ow { color: #AA22FF; font-weight: bold }
|
||||
#chatbot .w { color: #bbbbbb }
|
||||
#chatbot .mb { color: #666666 }
|
||||
#chatbot .mf { color: #666666 }
|
||||
#chatbot .mh { color: #666666 }
|
||||
#chatbot .mi { color: #666666 }
|
||||
#chatbot .mo { color: #666666 }
|
||||
#chatbot .sa { color: #BA2121 }
|
||||
#chatbot .sb { color: #BA2121 }
|
||||
#chatbot .sc { color: #BA2121 }
|
||||
#chatbot .dl { color: #BA2121 }
|
||||
#chatbot .sd { color: #BA2121; font-style: italic }
|
||||
#chatbot .s2 { color: #BA2121 }
|
||||
#chatbot .se { color: #BB6622; font-weight: bold }
|
||||
#chatbot .sh { color: #BA2121 }
|
||||
#chatbot .si { color: #BB6688; font-weight: bold }
|
||||
#chatbot .sx { color: #008000 }
|
||||
#chatbot .sr { color: #BB6688 }
|
||||
#chatbot .s1 { color: #BA2121 }
|
||||
#chatbot .ss { color: #19177C }
|
||||
#chatbot .bp { color: #008000 }
|
||||
#chatbot .fm { color: #0000FF }
|
||||
#chatbot .vc { color: #19177C }
|
||||
#chatbot .vg { color: #19177C }
|
||||
#chatbot .vi { color: #19177C }
|
||||
#chatbot .vm { color: #19177C }
|
||||
#chatbot .il { color: #666666 }
|
||||
""")
|
||||
#.highlight { background: #f8f8f8; }
|
||||
|
||||
168
models/LLaVA/llava/serve/gradio_patch.py
Normal file
168
models/LLaVA/llava/serve/gradio_patch.py
Normal file
@@ -0,0 +1,168 @@
|
||||
"""
|
||||
Adopted from https://github.com/gradio-app/gradio/blob/main/gradio/components.py
|
||||
Fix a markdown render problem.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from gradio.components import *
|
||||
from markdown2 import Markdown
|
||||
|
||||
|
||||
class _Keywords(Enum):
|
||||
NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
|
||||
FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
|
||||
|
||||
|
||||
@document("style")
|
||||
class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable):
|
||||
"""
|
||||
Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
|
||||
Preprocessing: this component does *not* accept input.
|
||||
Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.
|
||||
|
||||
Demos: chatbot_simple, chatbot_multimodal
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
value: List[Tuple[str | None, str | None]] | Callable | None = None,
|
||||
color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
|
||||
*,
|
||||
label: str | None = None,
|
||||
every: float | None = None,
|
||||
show_label: bool = True,
|
||||
visible: bool = True,
|
||||
elem_id: str | None = None,
|
||||
elem_classes: List[str] | str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Parameters:
|
||||
value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.
|
||||
label: component name in interface.
|
||||
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
|
||||
show_label: if True, will display label.
|
||||
visible: If False, component will be hidden.
|
||||
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
|
||||
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
|
||||
"""
|
||||
if color_map is not None:
|
||||
warnings.warn(
|
||||
"The 'color_map' parameter has been deprecated.",
|
||||
)
|
||||
#self.md = utils.get_markdown_parser()
|
||||
self.md = Markdown(extras=["fenced-code-blocks", "tables", "break-on-newline"])
|
||||
self.select: EventListenerMethod
|
||||
"""
|
||||
Event listener for when the user selects message from Chatbot.
|
||||
Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.
|
||||
See EventData documentation on how to use this event data.
|
||||
"""
|
||||
|
||||
IOComponent.__init__(
|
||||
self,
|
||||
label=label,
|
||||
every=every,
|
||||
show_label=show_label,
|
||||
visible=visible,
|
||||
elem_id=elem_id,
|
||||
elem_classes=elem_classes,
|
||||
value=value,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def get_config(self):
|
||||
return {
|
||||
"value": self.value,
|
||||
"selectable": self.selectable,
|
||||
**IOComponent.get_config(self),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def update(
|
||||
value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
|
||||
label: str | None = None,
|
||||
show_label: bool | None = None,
|
||||
visible: bool | None = None,
|
||||
):
|
||||
updated_config = {
|
||||
"label": label,
|
||||
"show_label": show_label,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"__type__": "update",
|
||||
}
|
||||
return updated_config
|
||||
|
||||
def _process_chat_messages(
|
||||
self, chat_message: str | Tuple | List | Dict | None
|
||||
) -> str | Dict | None:
|
||||
if chat_message is None:
|
||||
return None
|
||||
elif isinstance(chat_message, (tuple, list)):
|
||||
mime_type = processing_utils.get_mimetype(chat_message[0])
|
||||
return {
|
||||
"name": chat_message[0],
|
||||
"mime_type": mime_type,
|
||||
"alt_text": chat_message[1] if len(chat_message) > 1 else None,
|
||||
"data": None, # These last two fields are filled in by the frontend
|
||||
"is_file": True,
|
||||
}
|
||||
elif isinstance(
|
||||
chat_message, dict
|
||||
): # This happens for previously processed messages
|
||||
return chat_message
|
||||
elif isinstance(chat_message, str):
|
||||
#return self.md.render(chat_message)
|
||||
return str(self.md.convert(chat_message))
|
||||
else:
|
||||
raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
|
||||
|
||||
def postprocess(
|
||||
self,
|
||||
y: List[
|
||||
Tuple[str | Tuple | List | Dict | None, str | Tuple | List | Dict | None]
|
||||
],
|
||||
) -> List[Tuple[str | Dict | None, str | Dict | None]]:
|
||||
"""
|
||||
Parameters:
|
||||
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
|
||||
Returns:
|
||||
List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.
|
||||
"""
|
||||
if y is None:
|
||||
return []
|
||||
processed_messages = []
|
||||
for message_pair in y:
|
||||
assert isinstance(
|
||||
message_pair, (tuple, list)
|
||||
), f"Expected a list of lists or list of tuples. Received: {message_pair}"
|
||||
assert (
|
||||
len(message_pair) == 2
|
||||
), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
|
||||
processed_messages.append(
|
||||
(
|
||||
#self._process_chat_messages(message_pair[0]),
|
||||
'<pre style="font-family: var(--font)">' +
|
||||
message_pair[0] + "</pre>",
|
||||
self._process_chat_messages(message_pair[1]),
|
||||
)
|
||||
)
|
||||
return processed_messages
|
||||
|
||||
def style(self, height: int | None = None, **kwargs):
|
||||
"""
|
||||
This method can be used to change the appearance of the Chatbot component.
|
||||
"""
|
||||
if height is not None:
|
||||
self._style["height"] = height
|
||||
if kwargs.get("color_map") is not None:
|
||||
warnings.warn("The 'color_map' parameter has been deprecated.")
|
||||
|
||||
Component.style(
|
||||
self,
|
||||
**kwargs,
|
||||
)
|
||||
return self
|
||||
|
||||
|
||||
431
models/LLaVA/llava/serve/gradio_web_server.py
Normal file
431
models/LLaVA/llava/serve/gradio_web_server.py
Normal file
@@ -0,0 +1,431 @@
|
||||
import argparse
|
||||
from collections import defaultdict
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
|
||||
import gradio as gr
|
||||
import requests
|
||||
|
||||
from llava.conversation import (default_conversation, conv_templates,
|
||||
SeparatorStyle)
|
||||
from llava.constants import LOGDIR
|
||||
from llava.utils import (build_logger, server_error_msg,
|
||||
violates_moderation, moderation_msg)
|
||||
from llava.serve.gradio_patch import Chatbot as grChatbot
|
||||
from llava.serve.gradio_css import code_highlight_css
|
||||
import hashlib
|
||||
|
||||
|
||||
logger = build_logger("gradio_web_server", "gradio_web_server.log")
|
||||
|
||||
headers = {"User-Agent": "LLaVA Client"}
|
||||
|
||||
no_change_btn = gr.Button.update()
|
||||
enable_btn = gr.Button.update(interactive=True)
|
||||
disable_btn = gr.Button.update(interactive=False)
|
||||
|
||||
priority = {
|
||||
"vicuna-13b": "aaaaaaa",
|
||||
"koala-13b": "aaaaaab",
|
||||
}
|
||||
|
||||
|
||||
def get_conv_log_filename():
|
||||
t = datetime.datetime.now()
|
||||
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
|
||||
return name
|
||||
|
||||
|
||||
def get_model_list():
|
||||
ret = requests.post(args.controller_url + "/refresh_all_workers")
|
||||
assert ret.status_code == 200
|
||||
ret = requests.post(args.controller_url + "/list_models")
|
||||
models = ret.json()["models"]
|
||||
models.sort(key=lambda x: priority.get(x, x))
|
||||
logger.info(f"Models: {models}")
|
||||
return models
|
||||
|
||||
|
||||
get_window_url_params = """
|
||||
function() {
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
url_params = Object.fromEntries(params);
|
||||
console.log(url_params);
|
||||
return url_params;
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def load_demo(url_params, request: gr.Request):
|
||||
logger.info(f"load_demo. ip: {request.client.host}. params: {url_params}")
|
||||
|
||||
dropdown_update = gr.Dropdown.update(visible=True)
|
||||
if "model" in url_params:
|
||||
model = url_params["model"]
|
||||
if model in models:
|
||||
dropdown_update = gr.Dropdown.update(
|
||||
value=model, visible=True)
|
||||
|
||||
state = default_conversation.copy()
|
||||
return (state,
|
||||
dropdown_update,
|
||||
gr.Chatbot.update(visible=True),
|
||||
gr.Textbox.update(visible=True),
|
||||
gr.Button.update(visible=True),
|
||||
gr.Row.update(visible=True),
|
||||
gr.Accordion.update(visible=True))
|
||||
|
||||
|
||||
def load_demo_refresh_model_list(request: gr.Request):
|
||||
logger.info(f"load_demo. ip: {request.client.host}")
|
||||
models = get_model_list()
|
||||
state = default_conversation.copy()
|
||||
return (state, gr.Dropdown.update(
|
||||
choices=models,
|
||||
value=models[0] if len(models) > 0 else ""),
|
||||
gr.Chatbot.update(visible=True),
|
||||
gr.Textbox.update(visible=True),
|
||||
gr.Button.update(visible=True),
|
||||
gr.Row.update(visible=True),
|
||||
gr.Accordion.update(visible=True))
|
||||
|
||||
|
||||
def vote_last_response(state, vote_type, model_selector, request: gr.Request):
|
||||
with open(get_conv_log_filename(), "a") as fout:
|
||||
data = {
|
||||
"tstamp": round(time.time(), 4),
|
||||
"type": vote_type,
|
||||
"model": model_selector,
|
||||
"state": state.dict(),
|
||||
"ip": request.client.host,
|
||||
}
|
||||
fout.write(json.dumps(data) + "\n")
|
||||
|
||||
|
||||
def upvote_last_response(state, model_selector, request: gr.Request):
|
||||
logger.info(f"upvote. ip: {request.client.host}")
|
||||
vote_last_response(state, "upvote", model_selector, request)
|
||||
return ("",) + (disable_btn,) * 3
|
||||
|
||||
|
||||
def downvote_last_response(state, model_selector, request: gr.Request):
|
||||
logger.info(f"downvote. ip: {request.client.host}")
|
||||
vote_last_response(state, "downvote", model_selector, request)
|
||||
return ("",) + (disable_btn,) * 3
|
||||
|
||||
|
||||
def flag_last_response(state, model_selector, request: gr.Request):
|
||||
logger.info(f"flag. ip: {request.client.host}")
|
||||
vote_last_response(state, "flag", model_selector, request)
|
||||
return ("",) + (disable_btn,) * 3
|
||||
|
||||
|
||||
def regenerate(state, image_process_mode, request: gr.Request):
|
||||
logger.info(f"regenerate. ip: {request.client.host}")
|
||||
state.messages[-1][-1] = None
|
||||
prev_human_msg = state.messages[-2]
|
||||
if type(prev_human_msg[1]) in (tuple, list):
|
||||
prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode)
|
||||
state.skip_next = False
|
||||
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
||||
|
||||
|
||||
def clear_history(request: gr.Request):
|
||||
logger.info(f"clear_history. ip: {request.client.host}")
|
||||
state = default_conversation.copy()
|
||||
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
||||
|
||||
|
||||
def add_text(state, text, image, image_process_mode, request: gr.Request):
|
||||
logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}")
|
||||
if len(text) <= 0 and image is None:
|
||||
state.skip_next = True
|
||||
return (state, state.to_gradio_chatbot(), "", None) + (no_change_btn,) * 5
|
||||
if args.moderate:
|
||||
flagged = violates_moderation(text)
|
||||
if flagged:
|
||||
state.skip_next = True
|
||||
return (state, state.to_gradio_chatbot(), moderation_msg, None) + (
|
||||
no_change_btn,) * 5
|
||||
|
||||
text = text[:1536] # Hard cut-off
|
||||
if image is not None:
|
||||
text = text[:1200] # Hard cut-off for images
|
||||
if '<image>' not in text:
|
||||
text = text + '\n<image>'
|
||||
text = (text, image, image_process_mode)
|
||||
state = default_conversation.copy()
|
||||
state.append_message(state.roles[0], text)
|
||||
state.append_message(state.roles[1], None)
|
||||
state.skip_next = False
|
||||
return (state, state.to_gradio_chatbot(), "", None) + (disable_btn,) * 5
|
||||
|
||||
|
||||
def post_process_code(code):
|
||||
sep = "\n```"
|
||||
if sep in code:
|
||||
blocks = code.split(sep)
|
||||
if len(blocks) % 2 == 1:
|
||||
for i in range(1, len(blocks), 2):
|
||||
blocks[i] = blocks[i].replace("\\_", "_")
|
||||
code = sep.join(blocks)
|
||||
return code
|
||||
|
||||
|
||||
def http_bot(state, model_selector, temperature, max_new_tokens, request: gr.Request):
|
||||
logger.info(f"http_bot. ip: {request.client.host}")
|
||||
start_tstamp = time.time()
|
||||
model_name = model_selector
|
||||
|
||||
if state.skip_next:
|
||||
# This generate call is skipped due to invalid inputs
|
||||
yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
|
||||
return
|
||||
|
||||
if len(state.messages) == state.offset + 2:
|
||||
# First round of conversation
|
||||
if "llava" in model_name.lower():
|
||||
if "v1" in model_name.lower():
|
||||
template_name = "llava_v1"
|
||||
elif "mpt" in model_name.lower():
|
||||
template_name = "mpt_multimodal"
|
||||
else:
|
||||
template_name = "multimodal"
|
||||
elif "mpt" in model_name:
|
||||
template_name = "mpt_text"
|
||||
elif "koala" in model_name: # Hardcode the condition
|
||||
template_name = "bair_v1"
|
||||
elif "v1" in model_name: # vicuna v1_1/v1_2
|
||||
template_name = "vicuna_v1_1"
|
||||
else:
|
||||
template_name = "v1"
|
||||
new_state = conv_templates[template_name].copy()
|
||||
new_state.append_message(new_state.roles[0], state.messages[-2][1])
|
||||
new_state.append_message(new_state.roles[1], None)
|
||||
state = new_state
|
||||
|
||||
# Query worker address
|
||||
controller_url = args.controller_url
|
||||
ret = requests.post(controller_url + "/get_worker_address",
|
||||
json={"model": model_name})
|
||||
worker_addr = ret.json()["address"]
|
||||
logger.info(f"model_name: {model_name}, worker_addr: {worker_addr}")
|
||||
|
||||
# No available worker
|
||||
if worker_addr == "":
|
||||
state.messages[-1][-1] = server_error_msg
|
||||
yield (state, state.to_gradio_chatbot(), disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
|
||||
# Construct prompt
|
||||
prompt = state.get_prompt()
|
||||
|
||||
all_images = state.get_images(return_pil=True)
|
||||
all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images]
|
||||
for image, hash in zip(all_images, all_image_hash):
|
||||
t = datetime.datetime.now()
|
||||
filename = os.path.join(LOGDIR, "serve_images", f"{t.year}-{t.month:02d}-{t.day:02d}", f"{hash}.jpg")
|
||||
if not os.path.isfile(filename):
|
||||
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
||||
image.save(filename)
|
||||
|
||||
# Make requests
|
||||
pload = {
|
||||
"model": model_name,
|
||||
"prompt": prompt,
|
||||
"temperature": float(temperature),
|
||||
"max_new_tokens": min(int(max_new_tokens), 1536),
|
||||
"stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
|
||||
"images": f'List of {len(state.get_images())} images: {all_image_hash}',
|
||||
}
|
||||
logger.info(f"==== request ====\n{pload}")
|
||||
|
||||
pload['images'] = state.get_images()
|
||||
|
||||
state.messages[-1][-1] = "▌"
|
||||
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
|
||||
|
||||
try:
|
||||
# Stream output
|
||||
response = requests.post(worker_addr + "/worker_generate_stream",
|
||||
headers=headers, json=pload, stream=True, timeout=10)
|
||||
for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
|
||||
if chunk:
|
||||
data = json.loads(chunk.decode())
|
||||
if data["error_code"] == 0:
|
||||
output = data["text"][len(prompt):].strip()
|
||||
output = post_process_code(output)
|
||||
state.messages[-1][-1] = output + "▌"
|
||||
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
|
||||
else:
|
||||
output = data["text"] + f" (error_code: {data['error_code']})"
|
||||
state.messages[-1][-1] = output
|
||||
yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
time.sleep(0.03)
|
||||
except requests.exceptions.RequestException as e:
|
||||
state.messages[-1][-1] = server_error_msg
|
||||
yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
|
||||
state.messages[-1][-1] = state.messages[-1][-1][:-1]
|
||||
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
|
||||
|
||||
finish_tstamp = time.time()
|
||||
logger.info(f"{output}")
|
||||
|
||||
with open(get_conv_log_filename(), "a") as fout:
|
||||
data = {
|
||||
"tstamp": round(finish_tstamp, 4),
|
||||
"type": "chat",
|
||||
"model": model_name,
|
||||
"start": round(start_tstamp, 4),
|
||||
"finish": round(start_tstamp, 4),
|
||||
"state": state.dict(),
|
||||
"images": all_image_hash,
|
||||
"ip": request.client.host,
|
||||
}
|
||||
fout.write(json.dumps(data) + "\n")
|
||||
|
||||
title_markdown = ("""
|
||||
# 🌋 LLaVA: Large Language and Vision Assistant
|
||||
[[Project Page]](https://llava-vl.github.io) [[Paper]](https://arxiv.org/abs/2304.08485) [[Code]](https://github.com/haotian-liu/LLaVA) [[Model]](https://huggingface.co/liuhaotian/LLaVA-13b-delta-v0)
|
||||
""")
|
||||
|
||||
tos_markdown = ("""
|
||||
### Terms of use
|
||||
By using this service, users are required to agree to the following terms:
|
||||
The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
|
||||
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
|
||||
For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
|
||||
""")
|
||||
|
||||
|
||||
learn_more_markdown = ("""
|
||||
### License
|
||||
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
|
||||
""")
|
||||
|
||||
|
||||
css = code_highlight_css + """
|
||||
pre {
|
||||
white-space: pre-wrap; /* Since CSS 2.1 */
|
||||
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
|
||||
white-space: -pre-wrap; /* Opera 4-6 */
|
||||
white-space: -o-pre-wrap; /* Opera 7 */
|
||||
word-wrap: break-word; /* Internet Explorer 5.5+ */
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
def build_demo(embed_mode):
|
||||
textbox = gr.Textbox(show_label=False,
|
||||
placeholder="Enter text and press ENTER", visible=False).style(container=False)
|
||||
with gr.Blocks(title="LLaVA", theme=gr.themes.Base(), css=css) as demo:
|
||||
state = gr.State()
|
||||
|
||||
if not embed_mode:
|
||||
gr.Markdown(title_markdown)
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column(scale=3):
|
||||
with gr.Row(elem_id="model_selector_row"):
|
||||
model_selector = gr.Dropdown(
|
||||
choices=models,
|
||||
value=models[0] if len(models) > 0 else "",
|
||||
interactive=True,
|
||||
show_label=False).style(container=False)
|
||||
|
||||
imagebox = gr.Image(type="pil")
|
||||
image_process_mode = gr.Radio(
|
||||
["Crop", "Resize", "Pad"],
|
||||
value="Crop",
|
||||
label="Preprocess for non-square image")
|
||||
|
||||
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
gr.Examples(examples=[
|
||||
[f"{cur_dir}/examples/extreme_ironing.jpg", "What is unusual about this image?"],
|
||||
[f"{cur_dir}/examples/waterview.jpg", "What are the things I should be cautious about when I visit here?"],
|
||||
], inputs=[imagebox, textbox])
|
||||
|
||||
with gr.Accordion("Parameters", open=False, visible=False) as parameter_row:
|
||||
temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature",)
|
||||
max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
|
||||
|
||||
with gr.Column(scale=6):
|
||||
chatbot = grChatbot(elem_id="chatbot", label="LLaVA Chatbot", visible=False).style(height=550)
|
||||
with gr.Row():
|
||||
with gr.Column(scale=8):
|
||||
textbox.render()
|
||||
with gr.Column(scale=1, min_width=60):
|
||||
submit_btn = gr.Button(value="Submit", visible=False)
|
||||
with gr.Row(visible=False) as button_row:
|
||||
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
|
||||
downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
|
||||
flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
|
||||
#stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
|
||||
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
|
||||
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
|
||||
|
||||
if not embed_mode:
|
||||
gr.Markdown(tos_markdown)
|
||||
gr.Markdown(learn_more_markdown)
|
||||
url_params = gr.JSON(visible=False)
|
||||
|
||||
# Register listeners
|
||||
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
|
||||
upvote_btn.click(upvote_last_response,
|
||||
[state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
|
||||
downvote_btn.click(downvote_last_response,
|
||||
[state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
|
||||
flag_btn.click(flag_last_response,
|
||||
[state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
|
||||
regenerate_btn.click(regenerate, [state, image_process_mode],
|
||||
[state, chatbot, textbox, imagebox] + btn_list).then(
|
||||
http_bot, [state, model_selector, temperature, max_output_tokens],
|
||||
[state, chatbot] + btn_list)
|
||||
clear_btn.click(clear_history, None, [state, chatbot, textbox, imagebox] + btn_list)
|
||||
|
||||
textbox.submit(add_text, [state, textbox, imagebox, image_process_mode], [state, chatbot, textbox, imagebox] + btn_list
|
||||
).then(http_bot, [state, model_selector, temperature, max_output_tokens],
|
||||
[state, chatbot] + btn_list)
|
||||
submit_btn.click(add_text, [state, textbox, imagebox, image_process_mode], [state, chatbot, textbox, imagebox] + btn_list
|
||||
).then(http_bot, [state, model_selector, temperature, max_output_tokens],
|
||||
[state, chatbot] + btn_list)
|
||||
|
||||
if args.model_list_mode == "once":
|
||||
demo.load(load_demo, [url_params], [state, model_selector,
|
||||
chatbot, textbox, submit_btn, button_row, parameter_row],
|
||||
_js=get_window_url_params)
|
||||
elif args.model_list_mode == "reload":
|
||||
demo.load(load_demo_refresh_model_list, None, [state, model_selector,
|
||||
chatbot, textbox, submit_btn, button_row, parameter_row])
|
||||
else:
|
||||
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
|
||||
|
||||
return demo
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="0.0.0.0")
|
||||
parser.add_argument("--port", type=int)
|
||||
parser.add_argument("--controller-url", type=str, default="http://localhost:21001")
|
||||
parser.add_argument("--concurrency-count", type=int, default=8)
|
||||
parser.add_argument("--model-list-mode", type=str, default="once",
|
||||
choices=["once", "reload"])
|
||||
parser.add_argument("--share", action="store_true")
|
||||
parser.add_argument("--moderate", action="store_true")
|
||||
parser.add_argument("--embed", action="store_true")
|
||||
args = parser.parse_args()
|
||||
logger.info(f"args: {args}")
|
||||
|
||||
models = get_model_list()
|
||||
|
||||
logger.info(args)
|
||||
demo = build_demo(args.embed)
|
||||
demo.queue(concurrency_count=args.concurrency_count, status_update_rate=10,
|
||||
api_open=False).launch(
|
||||
server_name=args.host, server_port=args.port, share=args.share)
|
||||
384
models/LLaVA/llava/serve/model_worker.py
Normal file
384
models/LLaVA/llava/serve/model_worker.py
Normal file
@@ -0,0 +1,384 @@
|
||||
"""
|
||||
A model worker executes the model.
|
||||
"""
|
||||
import argparse
|
||||
import asyncio
|
||||
import dataclasses
|
||||
import logging
|
||||
import json
|
||||
import time
|
||||
from typing import List, Union
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
from fastapi import FastAPI, Request, BackgroundTasks
|
||||
from fastapi.responses import StreamingResponse
|
||||
import requests
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
import uvicorn
|
||||
from functools import partial
|
||||
|
||||
from llava.constants import WORKER_HEART_BEAT_INTERVAL
|
||||
from llava.utils import (build_logger, server_error_msg,
|
||||
pretty_print_semaphore)
|
||||
from llava.model import *
|
||||
|
||||
GB = 1 << 30
|
||||
|
||||
worker_id = str(uuid.uuid4())[:6]
|
||||
logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
|
||||
global_counter = 0
|
||||
|
||||
model_semaphore = None
|
||||
|
||||
|
||||
DEFAULT_IMAGE_TOKEN = "<image>"
|
||||
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
|
||||
DEFAULT_IM_START_TOKEN = "<im_start>"
|
||||
DEFAULT_IM_END_TOKEN = "<im_end>"
|
||||
|
||||
|
||||
def heart_beat_worker(controller):
|
||||
|
||||
while True:
|
||||
time.sleep(WORKER_HEART_BEAT_INTERVAL)
|
||||
controller.send_heart_beat()
|
||||
|
||||
|
||||
def load_model(model_path, model_name, num_gpus):
|
||||
if num_gpus == 1:
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = {
|
||||
"device_map": "auto",
|
||||
"max_memory": {i: "13GiB" for i in range(num_gpus)},
|
||||
}
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
||||
if 'llava' in model_name.lower():
|
||||
if 'mpt' in model_name.lower():
|
||||
model = LlavaMPTForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs)
|
||||
else:
|
||||
model = LlavaLlamaForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs)
|
||||
elif 'mpt' in model_name.lower():
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
|
||||
else:
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True, **kwargs)
|
||||
|
||||
image_processor = None
|
||||
|
||||
if 'llava' in model_name.lower():
|
||||
from transformers import CLIPImageProcessor, CLIPVisionModel
|
||||
image_processor = CLIPImageProcessor.from_pretrained(model.config.mm_vision_tower, torch_dtype=torch.float16)
|
||||
|
||||
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
|
||||
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
|
||||
if mm_use_im_start_end:
|
||||
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
|
||||
|
||||
vision_tower = model.get_model().vision_tower[0]
|
||||
if vision_tower.device.type == 'meta':
|
||||
vision_tower = CLIPVisionModel.from_pretrained(vision_tower.config._name_or_path, torch_dtype=torch.float16, low_cpu_mem_usage=True).cuda()
|
||||
model.get_model().vision_tower[0] = vision_tower
|
||||
else:
|
||||
vision_tower.to(device='cuda', dtype=torch.float16)
|
||||
vision_config = vision_tower.config
|
||||
vision_config.im_patch_token = tokenizer.convert_tokens_to_ids([DEFAULT_IMAGE_PATCH_TOKEN])[0]
|
||||
vision_config.use_im_start_end = mm_use_im_start_end
|
||||
if mm_use_im_start_end:
|
||||
vision_config.im_start_token, vision_config.im_end_token = tokenizer.convert_tokens_to_ids([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN])
|
||||
|
||||
if num_gpus == 1:
|
||||
model.cuda()
|
||||
|
||||
if hasattr(model.config, "max_sequence_length"):
|
||||
context_len = model.config.max_sequence_length
|
||||
else:
|
||||
context_len = 2048
|
||||
|
||||
return tokenizer, model, image_processor, context_len
|
||||
|
||||
|
||||
class ModelWorker:
|
||||
def __init__(self, controller_addr, worker_addr,
|
||||
worker_id, no_register,
|
||||
model_path, model_name,
|
||||
keep_aspect_ratio,
|
||||
num_gpus):
|
||||
self.controller_addr = controller_addr
|
||||
self.worker_addr = worker_addr
|
||||
self.worker_id = worker_id
|
||||
if model_path.endswith("/"):
|
||||
model_path = model_path[:-1]
|
||||
if model_name is None:
|
||||
model_paths = model_path.split("/")
|
||||
if model_paths[-1].startswith('checkpoint-'):
|
||||
self.model_name = model_paths[-2] + "_" + model_paths[-1]
|
||||
else:
|
||||
self.model_name = model_paths[-1]
|
||||
else:
|
||||
self.model_name = model_name
|
||||
|
||||
logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...")
|
||||
self.keep_aspect_ratio = keep_aspect_ratio
|
||||
self.tokenizer, self.model, self.image_processor, self.context_len = load_model(
|
||||
model_path, self.model_name, num_gpus)
|
||||
self.is_multimodal = 'llava' in model_path.lower()
|
||||
|
||||
if not no_register:
|
||||
self.register_to_controller()
|
||||
self.heart_beat_thread = threading.Thread(
|
||||
target=heart_beat_worker, args=(self,))
|
||||
self.heart_beat_thread.start()
|
||||
|
||||
def register_to_controller(self):
|
||||
logger.info("Register to controller")
|
||||
|
||||
url = self.controller_addr + "/register_worker"
|
||||
data = {
|
||||
"worker_name": self.worker_addr,
|
||||
"check_heart_beat": True,
|
||||
"worker_status": self.get_status()
|
||||
}
|
||||
r = requests.post(url, json=data)
|
||||
assert r.status_code == 200
|
||||
|
||||
def send_heart_beat(self):
|
||||
logger.info(f"Send heart beat. Models: {[self.model_name]}. "
|
||||
f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
|
||||
f"global_counter: {global_counter}")
|
||||
|
||||
url = self.controller_addr + "/receive_heart_beat"
|
||||
|
||||
while True:
|
||||
try:
|
||||
ret = requests.post(url, json={
|
||||
"worker_name": self.worker_addr,
|
||||
"queue_length": self.get_queue_length()}, timeout=5)
|
||||
exist = ret.json()["exist"]
|
||||
break
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"heart beat error: {e}")
|
||||
time.sleep(5)
|
||||
|
||||
if not exist:
|
||||
self.register_to_controller()
|
||||
|
||||
def get_queue_length(self):
|
||||
if model_semaphore is None:
|
||||
return 0
|
||||
else:
|
||||
return args.limit_model_concurrency - model_semaphore._value + (len(
|
||||
model_semaphore._waiters) if model_semaphore._waiters is not None else 0)
|
||||
|
||||
def get_status(self):
|
||||
return {
|
||||
"model_names": [self.model_name],
|
||||
"speed": 1,
|
||||
"queue_length": self.get_queue_length(),
|
||||
}
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate_stream(self, params):
|
||||
tokenizer, model, image_processor = self.tokenizer, self.model, self.image_processor
|
||||
|
||||
prompt = params["prompt"]
|
||||
ori_prompt = prompt
|
||||
images = params.get("images", None)
|
||||
if images is not None and len(images) > 0 and self.is_multimodal:
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
import base64
|
||||
assert type(images) is list
|
||||
if len(images) > 0:
|
||||
# assert len(images) == 1, "Only support one image for now"
|
||||
images = [Image.open(BytesIO(base64.b64decode(image))) for image in images]
|
||||
assert len(images) == prompt.count(DEFAULT_IMAGE_TOKEN), "Number of images does not match number of <image> tokens in prompt"
|
||||
|
||||
if self.keep_aspect_ratio:
|
||||
new_images = []
|
||||
for image_idx, image in enumerate(images):
|
||||
max_hw, min_hw = max(image.size), min(image.size)
|
||||
aspect_ratio = max_hw / min_hw
|
||||
max_len, min_len = 448, 224
|
||||
shortest_edge = int(min(max_len / aspect_ratio, min_len))
|
||||
image = image_processor.preprocess(image, return_tensors='pt', do_center_crop=False, size={"shortest_edge": shortest_edge})['pixel_values'][0]
|
||||
new_images.append(image.to(self.model.device, dtype=torch.float16))
|
||||
# replace the image token with the image patch token in the prompt (each occurrence)
|
||||
cur_token_len = (image.shape[1]//14) * (image.shape[2]//14)
|
||||
replace_token = DEFAULT_IMAGE_PATCH_TOKEN * cur_token_len
|
||||
if getattr(self.model.config, 'mm_use_im_start_end', False):
|
||||
replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
|
||||
prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token, 1)
|
||||
images = new_images
|
||||
else:
|
||||
images = image_processor(images, return_tensors='pt')['pixel_values']
|
||||
images = images.to(self.model.device, dtype=torch.float16)
|
||||
replace_token = DEFAULT_IMAGE_PATCH_TOKEN * 256 # HACK: 256 is the max image token length hacked
|
||||
if getattr(self.model.config, 'mm_use_im_start_end', False):
|
||||
replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN
|
||||
prompt = prompt.replace(DEFAULT_IMAGE_TOKEN, replace_token)
|
||||
else:
|
||||
images = None
|
||||
image_args = {"images": images}
|
||||
else:
|
||||
images = None
|
||||
image_args = {}
|
||||
|
||||
l_prompt = len(prompt)
|
||||
temperature = float(params.get("temperature", 1.0))
|
||||
max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
|
||||
stop_str = params.get("stop", None)
|
||||
stop_idx = None
|
||||
if stop_str is not None:
|
||||
stop_idx = tokenizer(stop_str).input_ids
|
||||
if len(stop_idx) == 1:
|
||||
stop_idx = stop_idx[0]
|
||||
else:
|
||||
stop_idx = None
|
||||
|
||||
input_ids = tokenizer(prompt).input_ids
|
||||
output_ids = list(input_ids)
|
||||
pred_ids = []
|
||||
|
||||
max_src_len = self.context_len - max_new_tokens - 8
|
||||
input_ids = input_ids[-max_src_len:]
|
||||
|
||||
past_key_values = None
|
||||
for i in range(max_new_tokens):
|
||||
if i == 0:
|
||||
out = model(
|
||||
torch.as_tensor([input_ids]).cuda(),
|
||||
use_cache=True,
|
||||
**image_args)
|
||||
logits = out.logits
|
||||
past_key_values = out.past_key_values
|
||||
else:
|
||||
attention_mask = torch.ones(
|
||||
1, past_key_values[0][0].shape[-2] + 1, device="cuda")
|
||||
out = model(input_ids=torch.as_tensor([[token]], device="cuda"),
|
||||
use_cache=True,
|
||||
attention_mask=attention_mask,
|
||||
past_key_values=past_key_values)
|
||||
logits = out.logits
|
||||
past_key_values = out.past_key_values
|
||||
|
||||
last_token_logits = logits[0][-1]
|
||||
if temperature < 1e-4:
|
||||
token = int(torch.argmax(last_token_logits))
|
||||
else:
|
||||
probs = torch.softmax(last_token_logits / temperature, dim=-1)
|
||||
token = int(torch.multinomial(probs, num_samples=1))
|
||||
|
||||
output_ids.append(token)
|
||||
pred_ids.append(token)
|
||||
|
||||
if stop_idx is not None and token == stop_idx:
|
||||
stopped = True
|
||||
elif token == tokenizer.eos_token_id:
|
||||
stopped = True
|
||||
else:
|
||||
stopped = False
|
||||
|
||||
if i % args.stream_interval == 0 or i == max_new_tokens - 1 or stopped:
|
||||
cur_out = tokenizer.decode(pred_ids, skip_special_tokens=True)
|
||||
pos = cur_out.rfind(stop_str)
|
||||
if pos != -1:
|
||||
cur_out = cur_out[:pos]
|
||||
stopped = True
|
||||
output = ori_prompt + cur_out
|
||||
|
||||
ret = {
|
||||
"text": output,
|
||||
"error_code": 0,
|
||||
}
|
||||
yield json.dumps(ret).encode() + b"\0"
|
||||
|
||||
if stopped:
|
||||
break
|
||||
|
||||
if past_key_values is not None:
|
||||
del past_key_values
|
||||
|
||||
def generate_stream_gate(self, params):
|
||||
try:
|
||||
for x in self.generate_stream(params):
|
||||
yield x
|
||||
except ValueError as e:
|
||||
print("Caught ValueError:", e)
|
||||
ret = {
|
||||
"text": server_error_msg,
|
||||
"error_code": 1,
|
||||
}
|
||||
yield json.dumps(ret).encode() + b"\0"
|
||||
except torch.cuda.CudaError as e:
|
||||
print("Caught torch.cuda.CudaError:", e)
|
||||
ret = {
|
||||
"text": server_error_msg,
|
||||
"error_code": 1,
|
||||
}
|
||||
yield json.dumps(ret).encode() + b"\0"
|
||||
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
|
||||
def release_model_semaphore(fn=None):
|
||||
model_semaphore.release()
|
||||
if fn is not None:
|
||||
fn()
|
||||
|
||||
|
||||
@app.post("/worker_generate_stream")
|
||||
async def generate_stream(request: Request):
|
||||
global model_semaphore, global_counter
|
||||
global_counter += 1
|
||||
params = await request.json()
|
||||
|
||||
if model_semaphore is None:
|
||||
model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
|
||||
await model_semaphore.acquire()
|
||||
worker.send_heart_beat()
|
||||
generator = worker.generate_stream_gate(params)
|
||||
background_tasks = BackgroundTasks()
|
||||
background_tasks.add_task(partial(release_model_semaphore, fn=worker.send_heart_beat))
|
||||
return StreamingResponse(generator, background=background_tasks)
|
||||
|
||||
|
||||
@app.post("/worker_get_status")
|
||||
async def get_status(request: Request):
|
||||
return worker.get_status()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="localhost")
|
||||
parser.add_argument("--port", type=int, default=21002)
|
||||
parser.add_argument("--worker-address", type=str,
|
||||
default="http://localhost:21002")
|
||||
parser.add_argument("--controller-address", type=str,
|
||||
default="http://localhost:21001")
|
||||
parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
|
||||
parser.add_argument("--model-name", type=str)
|
||||
parser.add_argument("--multi-modal", action="store_true", help="Multimodal mode is automatically detected with model name, please make sure `llava` is included in the model path.")
|
||||
parser.add_argument("--keep-aspect-ratio", action="store_true")
|
||||
parser.add_argument("--num-gpus", type=int, default=1)
|
||||
parser.add_argument("--limit-model-concurrency", type=int, default=5)
|
||||
parser.add_argument("--stream-interval", type=int, default=2)
|
||||
parser.add_argument("--no-register", action="store_true")
|
||||
args = parser.parse_args()
|
||||
logger.info(f"args: {args}")
|
||||
|
||||
if args.multi_modal:
|
||||
logger.warning("Multimodal mode is automatically detected with model name, please make sure `llava` is included in the model path.")
|
||||
|
||||
worker = ModelWorker(args.controller_address,
|
||||
args.worker_address,
|
||||
worker_id,
|
||||
args.no_register,
|
||||
args.model_path,
|
||||
args.model_name,
|
||||
args.keep_aspect_ratio,
|
||||
args.num_gpus)
|
||||
uvicorn.run(app, host=args.host, port=args.port, log_level="info")
|
||||
26
models/LLaVA/llava/serve/register_worker.py
Normal file
26
models/LLaVA/llava/serve/register_worker.py
Normal file
@@ -0,0 +1,26 @@
|
||||
"""
|
||||
Manually register workers.
|
||||
|
||||
Usage:
|
||||
python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
import requests
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--controller-address", type=str)
|
||||
parser.add_argument("--worker-name", type=str)
|
||||
parser.add_argument("--check-heart-beat", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
url = args.controller_address + "/register_worker"
|
||||
data = {
|
||||
"worker_name": args.worker_name,
|
||||
"check_heart_beat": args.check_heart_beat,
|
||||
"worker_status": None,
|
||||
}
|
||||
r = requests.post(url, json=data)
|
||||
assert r.status_code == 200
|
||||
62
models/LLaVA/llava/serve/test_message.py
Normal file
62
models/LLaVA/llava/serve/test_message.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import argparse
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from llava.conversation import default_conversation
|
||||
|
||||
|
||||
def main():
|
||||
if args.worker_address:
|
||||
worker_addr = args.worker_address
|
||||
else:
|
||||
controller_addr = args.controller_address
|
||||
ret = requests.post(controller_addr + "/refresh_all_workers")
|
||||
ret = requests.post(controller_addr + "/list_models")
|
||||
models = ret.json()["models"]
|
||||
models.sort()
|
||||
print(f"Models: {models}")
|
||||
|
||||
ret = requests.post(controller_addr + "/get_worker_address",
|
||||
json={"model": args.model_name})
|
||||
worker_addr = ret.json()["address"]
|
||||
print(f"worker_addr: {worker_addr}")
|
||||
|
||||
if worker_addr == "":
|
||||
return
|
||||
|
||||
conv = default_conversation.copy()
|
||||
conv.append_message(conv.roles[0], args.message)
|
||||
prompt = conv.get_prompt()
|
||||
|
||||
headers = {"User-Agent": "LLaVA Client"}
|
||||
pload = {
|
||||
"model": args.model_name,
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": args.max_new_tokens,
|
||||
"temperature": 0.7,
|
||||
"stop": conv.sep,
|
||||
}
|
||||
response = requests.post(worker_addr + "/worker_generate_stream", headers=headers,
|
||||
json=pload, stream=True)
|
||||
|
||||
print(prompt.replace(conv.sep, "\n"), end="")
|
||||
for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
|
||||
if chunk:
|
||||
data = json.loads(chunk.decode("utf-8"))
|
||||
output = data["text"].split(conv.sep)[-1]
|
||||
print(output, end="\r")
|
||||
print("")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--controller-address", type=str, default="http://localhost:21001")
|
||||
parser.add_argument("--worker-address", type=str)
|
||||
parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
|
||||
parser.add_argument("--max-new-tokens", type=int, default=32)
|
||||
parser.add_argument("--message", type=str, default=
|
||||
"Tell me a story with more than 1000 words.")
|
||||
args = parser.parse_args()
|
||||
|
||||
main()
|
||||
Reference in New Issue
Block a user