From 389b2682914a0da6496af066fe82c98eb63557db Mon Sep 17 00:00:00 2001 From: CaptainB Date: Mon, 16 Dec 2024 16:10:43 +0800 Subject: [PATCH] feat: BaiLian Image Model --- .../aliyun_bai_lian_model_provider.py | 31 +++++- .../credential/image.py | 71 ++++++++++++++ .../credential/tti.py | 94 +++++++++++++++++++ .../model/image.py | 23 +++++ .../model/tti.py | 58 ++++++++++++ 5 files changed, 274 insertions(+), 3 deletions(-) create mode 100644 apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py create mode 100644 apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py create mode 100644 apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py create mode 100644 apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py index 2333505b620..c48052251bd 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py @@ -13,15 +13,19 @@ ModelInfoManage from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.embedding import \ AliyunBaiLianEmbeddingCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.image import QwenVLModelCredential from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.llm import BaiLianLLMModelCredential from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.reranker import \ AliyunBaiLianRerankerCredential from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.stt import AliyunBaiLianSTTModelCredential +from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.tti import QwenTextToImageModelCredential from setting.models_provider.impl.aliyun_bai_lian_model_provider.credential.tts import AliyunBaiLianTTSModelCredential from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.embedding import AliyunBaiLianEmbedding +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.image import QwenVLChatModel from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.llm import BaiLianChatModel from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.reranker import AliyunBaiLianReranker from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.stt import AliyunBaiLianSpeechToText +from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.tti import QwenTextToImageModel from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.tts import AliyunBaiLianTextToSpeech from smartdoc.conf import PROJECT_DIR @@ -30,6 +34,8 @@ aliyun_bai_lian_stt_model_credential = AliyunBaiLianSTTModelCredential() aliyun_bai_lian_embedding_model_credential = AliyunBaiLianEmbeddingCredential() aliyun_bai_lian_llm_model_credential = BaiLianLLMModelCredential() +qwenvl_model_credential = QwenVLModelCredential() +qwentti_model_credential = QwenTextToImageModelCredential() model_info_list = [ModelInfo('gte-rerank', '阿里巴巴通义实验室开发的GTE-Rerank文本排序系列模型,开发者可以通过LlamaIndex框架进行集成高质量文本检索、排序。', @@ -52,9 +58,28 @@ BaiLianChatModel) ] -model_info_manage = ModelInfoManage.builder().append_model_info_list(model_info_list).append_default_model_info( - model_info_list[1]).append_default_model_info(model_info_list[2]).append_default_model_info( - model_info_list[3]).append_default_model_info(model_info_list[4]).build() +module_info_vl_list = [ + ModelInfo('qwen-vl-max', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), + ModelInfo('qwen-vl-max-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), + ModelInfo('qwen-vl-plus-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel), +] +module_info_tti_list = [ + ModelInfo('wanx-v1', + '通义万相-文本生成图像大模型,支持中英文双语输入,支持输入参考图片进行参考内容或者参考风格迁移,重点风格包括但不限于水彩、油画、中国画、素描、扁平插画、二次元、3D卡通。', + ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel), +] + +model_info_manage = ( + ModelInfoManage.builder() + .append_model_info_list(model_info_list) + .append_model_info_list(module_info_vl_list) + .append_model_info_list(module_info_tti_list) + .append_default_model_info(model_info_list[1]) + .append_default_model_info(model_info_list[2]) + .append_default_model_info(model_info_list[3]) + .append_default_model_info(model_info_list[4]) + .build() +) class AliyunBaiLianModelProvider(IModelProvider): diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py new file mode 100644 index 00000000000..e77d4dfdf7e --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py @@ -0,0 +1,71 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:41 + @desc: +""" +import base64 +import os +from typing import Dict + +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QwenModelParams(BaseForm): + temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + required=True, default_value=1.0, + _min=0.1, + _max=1.9, + _step=0.01, + precision=2) + + max_tokens = forms.SliderField( + TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + required=True, default_value=800, + _min=1, + _max=100000, + _step=1, + precision=0) + + +class QwenVLModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + for chunk in res: + print(chunk) + except Exception as e: + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return QwenModelParams() diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py new file mode 100644 index 00000000000..395d94db9e1 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py @@ -0,0 +1,94 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎 + @file: llm.py + @date:2024/7/11 18:41 + @desc: +""" +import base64 +import os +from typing import Dict + +from langchain_core.messages import HumanMessage + +from common import forms +from common.exception.app_exception import AppApiException +from common.forms import BaseForm, TooltipLabel +from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode + + +class QwenModelParams(BaseForm): + size = forms.SingleSelect( + TooltipLabel('图片尺寸', '指定生成图片的尺寸, 如: 1024x1024'), + required=True, + default_value='1024*1024', + option_list=[ + {'value': '1024*1024', 'label': '1024*1024'}, + {'value': '720*1280', 'label': '720*1280'}, + {'value': '768*1152', 'label': '768*1152'}, + {'value': '1280*720', 'label': '1280*720'}, + ], + text_field='label', + value_field='value') + n = forms.SliderField( + TooltipLabel('图片数量', '指定生成图片的数量'), + required=True, default_value=1, + _min=1, + _max=4, + _step=1, + precision=0) + style = forms.SingleSelect( + TooltipLabel('风格', '指定生成图片的风格'), + required=True, + default_value='', + option_list=[ + {'value': '', 'label': '默认值,由模型随机输出图像风格'}, + {'value': '', 'label': '摄影'}, + {'value': '', 'label': '人像写真'}, + {'value': '<3d cartoon>', 'label': '3D卡通'}, + {'value': '', 'label': '动画'}, + {'value': '', 'label': '油画'}, + {'value': '', 'label': '水彩'}, + {'value': '', 'label': '素描'}, + {'value': '', 'label': '中国画'}, + {'value': '', 'label': '扁平插画'}, + ], + text_field='label', + value_field='value' + ) + + +class QwenTextToImageModelCredential(BaseForm, BaseModelCredential): + + def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], provider, + raise_exception=False): + model_type_list = provider.get_model_type_list() + if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): + raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + for key in ['api_key']: + if key not in model_credential: + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + else: + return False + try: + model = provider.get_model(model_type, model_name, model_credential) + res = model.check_auth() + print(res) + except Exception as e: + if isinstance(e, AppApiException): + raise e + if raise_exception: + raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + else: + return False + return True + + def encryption_dict(self, model: Dict[str, object]): + return {**model, 'api_key': super().encryption(model.get('api_key', ''))} + + api_key = forms.PasswordInputField('API Key', required=True) + + def get_model_params_setting_form(self, model_name): + return QwenModelParams() diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py new file mode 100644 index 00000000000..57598fe9761 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py @@ -0,0 +1,23 @@ +# coding=utf-8 + +from typing import Dict + +from langchain_community.chat_models import ChatOpenAI + +from setting.models_provider.base_model_provider import MaxKBBaseModel + + +class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI): + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) + chat_tong_yi = QwenVLChatModel( + model_name=model_name, + openai_api_key=model_credential.get('api_key'), + openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', + # stream_options={"include_usage": True}, + streaming=True, + model_kwargs=optional_params, + ) + return chat_tong_yi diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py new file mode 100644 index 00000000000..c2fd32877e4 --- /dev/null +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py @@ -0,0 +1,58 @@ +# coding=utf-8 +from http import HTTPStatus +from typing import Dict + +from dashscope import ImageSynthesis +from langchain_community.chat_models import ChatTongyi +from langchain_core.messages import HumanMessage + +from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_tti import BaseTextToImage + + +class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): + api_key: str + model_name: str + params: dict + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.api_key = kwargs.get('api_key') + self.model_name = kwargs.get('model_name') + self.params = kwargs.get('params') + + @staticmethod + def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + optional_params = {'params': {'size': '1024*1024', 'style': '', 'n': 1}} + for key, value in model_kwargs.items(): + if key not in ['model_id', 'use_local', 'streaming']: + optional_params['params'][key] = value + chat_tong_yi = QwenTextToImageModel( + model_name=model_name, + api_key=model_credential.get('api_key'), + **optional_params, + ) + return chat_tong_yi + + def is_cache_model(self): + return False + + def check_auth(self): + chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max') + chat.invoke([HumanMessage([{"type": "text", "text": "你好"}])]) + + def generate_image(self, prompt: str, negative_prompt: str = None): + # api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', + rsp = ImageSynthesis.call(api_key=self.api_key, + model=self.model_name, + prompt=prompt, + negative_prompt=negative_prompt, + **self.params) + file_urls = [] + if rsp.status_code == HTTPStatus.OK: + for result in rsp.output.results: + file_urls.append(result.url) + else: + print('sync_call Failed, status_code: %s, code: %s, message: %s' % + (rsp.status_code, rsp.code, rsp.message)) + return file_urls