Security News
Research
Data Theft Repackaged: A Case Study in Malicious Wrapper Packages on npm
The Socket Research Team breaks down a malicious wrapper package that uses obfuscation to harvest credentials and exfiltrate sensitive data.
AIBridge is the python package with the support of the Multiple LLM's,User can utilised the Formatters ,prompts, varibales to get most of the LLM's Through the AIBridge
pip install aibridge-test
Example in bash we set:
export AIBRIDGE_CONFIG=C:/Users/Admin/aibridge/aibridge_config.yaml
variable name for env:AIBRIDGE_CONFIG
starter file.
&id001
data: *id001
group_name: my_consumer_group
message_queue: redis
no_of_threads: 1
open_ai:
- key: API KEY HERE
priority: equal
palm_api:
- key: API KEY HERE
priority: equal
stable_diffusion:
- key: API KEY HERE
priority: equal
redis_host: localhost
redis_port: Loacal Host
stream_name: my_stream
database: nosql
database_name: aibridge
database_uri: mongodb://localhost:27017
per_page: 10
from AIBridge impprt Setconfig
#call the config method
Setconfig.set_db_confonfig(database=sql,database_name=None,database_uri=None)
#parameters:
#database-sql/nosql
#database_uri: url of the databse of your choice(all sql support for no sql(Mongo))
from AIBridge import SetConfig
SetConfig.set_api_key(ai_service="open_ai",key="YOUR_API_KEY",priority="high")
#priority:high/medium/low/equal
from AIBridge import PromptInsertion
# save prompt
data = PromptInsertion.save_prompt(
prompt="your prompt:{{data}},context:{{context}}",
name="first_prompt",
prompt_data={"data": "what is purpose of the ozone here"},
variables={"context": "environment_context"},
)
print(data)
# parameters: prompt_data: is used to manipulatre the same prompt with diffrent context at realtime
# variables: is used to manipulate the prompt with fixed context as varibales is a specific data
#update prompt can see the prompt_data and variables, is used the get diffrent output from same prompt
data = PromptInsertion.update_prompt(
id="prompt_id",
name="updated_prompt",
prompt_data={"data": "write abouts the plastic pollution"},
variables={"context": "ocean_pollution"},
)
print(data)
#Get prompt from id
data = PromptInsertion.get_prompt(id="prompt_id")
print(data)
# pagination support for getting the all prompt
data = PromptInsertion.get_all_prompt(page=1)
print(data)
from AIBridge import VariableInsertion
# save varibales
# parameters: var_key: key for the varibales
# var_value: list of the string for the context
data = VariableInsertion.save_variables(
var_key="ochean_context",
var_value=[
"Ocean pollution is a significant environmental issue that poses a threat to marine life and ecosystems"
],
)
print(data)
# update the variables
data = VariableInsertion.update_variables(
id="variable_id",
var_key="updated_string",
var_value=["updated senetece about topics"],
)
print(data)
# get Variables from id
data = VariableInsertion.get_variable(id="variable_id")
# get all Variables pagination
data = VariableInsertion.get_all_variable(page=1)
from AIBridge import OpenAIService
import json
json_schema = json.dumps({"animal": ["list of animals"]})
xml_schema = "<animals><category>animal name</category></animals>"
csv = "name,category,species,age,weight,color,habitat"
data = OpenAIService.generate(
prompts=["name of the animals in the {{jungle}}"],
prompt_ids=None,
prompt_data=[{"jungle": "jungle"}],
variables=None,
output_format=["json"],
format_strcture=[json_schema],
model="gpt-3.5-turbo",
variation_count=1,
max_tokens=3500,
temperature=0.5,
message_queue=False,
)
print(data)
# Prameters
# prompts= list of the string that need to executed in session where output id dependant on each other,
# promts_ids= prompt id's list and so at a time ids will execute or prompts,
# prompt_data=[data of the every prompt id they required],
# variables=[ varibale dict of the prompt],
# output_format=["xml/json/csv/sql/"],
# format_strcture=[out put strcture of the prompt],
# model="gpt-3.5-turbo", model for completion api of the gpt
# variation_count = 1, n of the output require
# max_tokens = 3500, maximut token per out put
# temperature = 0.5, data consistecy
# message_queue=False, scalability purpose
output = {
"items": {
"response": [
{
"data": [
'{"animal": ["lion", "tiger", "elephant", "monkey", "snake", "gorilla", "leopard", "crocodile", "jaguar", "giraffe"]}'
]
}
],
"token_used": 85,
"created_at": 1689323114.9568439,
"ai_service": "open_ai",
}
}
from AIBridge import SetConfig
# set redis configuration
SetConfig.redis_config(
redis_host="localhost",
redis_port="port _for redis",
group_name="consumer gorup name",
stream_name="redis topic",
no_of_threads=1,#concurrent thread ypu want run for your application
)
from AIBridge import OpenAIService
import json
json_schema = json.dumps({"animal": ["list of animals"]})
data = OpenAIService.generate(
prompts=["name of the animals in the {{jungle}}"],
prompt_ids=None,
prompt_data=[{"jungle": "jungle"}],
variables=None,
output_format=["json"],
format_strcture=[json_schema],
message_queue=True# to activate message queue service
)
# to use the Queue service use the name set the message queue prameter = True
print(data)
*Response for above function is the id of the response stored in the databse
{ "response_id": "eaa61944-3216-4ba1-bec5-05842fb86d86" }
from AIBridge import MessageQ
# to start the consumer in background
MessageQ.mq_deque()
from AIBridge import MessageQ
# these for testingthe redis env in local on single page file
data = MessageQ.local_process()
print(data)
###Dalle image genration###
from AIBridge.ai_services.openai_images import OpenAIImage
images = OpenAIImage.generate(
prompts=["A sunlit indoor lounge area with a pool containing a flamingo"],
image_data=["image loacation or image url"],
mask_image=["image loacation or image url"],
variation_count=1,
process_type="edit",
)
print(images)
# prompts: is list string how many diffrent image we have to genearte
# image_data: is the lacation of the image in file or the image url
# mask_image: is the mask image with transpernet patch in the image where we want edit the images
# variation_count: is the number of image we want to generate
# prcess type : create, edit, variation,
# create is for genrating new images
# edit the image with the mask mask is compulsary to edi the images
# variation is for genrating new images of sama type
palm_api:
- key: AIz****************************QkkA(your-api_key)
priority: equal
from AIBridge import SetConfig
SetConfig.set_api_key(ai_service="palm_api",key="YOUR_API_KEY",priority="high")
#priority:high/medium/low/equal
```python
from AIBridge import PalmText
prompt = """
write paragraph about the {{prompting}}in ai and let user know what is the{{prompting}} and how the {{prompting}} works in genrative AI
"""
json_format = """{"text": "paragraph here"}"""
data = PalmText.generate(
prompts=[prompt],
prompt_data=[{"prompting": "model training"}],
output_format=["json"],
format_strcture=[json_format],
message_queue=True,
)
print(data)
# Prameters
# prompts= list of the string that need to executed in session where output id dependant on each other,
# promts_ids= prompt id's list and so at a time ids will execute or prompts,
# prompt_data=[data of the every prompt id they required],
# variables=[ varibale dict of the prompt],
# output_format=["xml/json/csv/sql/"],
# format_strcture=[out put strcture of the prompt],
# model="models/text-bison-001", model for generate api of the palm
# variation_count = 1-8, n of the output require
# max_tokens = default 10000, maximut token per out put, no limit for token"
# temperature = default-0.5, data consistecy
# message_queue=False, scalability purpose
from AIBridge import PalmChat
# An array of "ideal" interactions between the user and the model
examples = [
(
"What's up?",
"What isn't up?? The sun rose another day, the world is bright, anything is possible!",
),
(
"I'm kind of bored",
"How can you be bored when there are so many fun, exciting, beautiful experiences to be had in the world?",
),
]
data = PalmChat.generate(
messages="give the protype for the stack on c++",
context="carreer or growth advides",
variation_count=3,
message_queue=True,
)
print(data)
# mesages: text provided to chat
# context: on the what basis do you want start the chat
# examples: demo for the LLm to undersat the tone and what do you reaaly want(few shot prompt type)
# variation_count: how many variations of the context should be used
# message_queue: if true, the chat will return the messages in a queue
# temperature = default-0.5, data consistecy
stable_diffusion:
- key: API Key here
priority: equal
from AIBridge import StableDiffusion
data = StableDiffusion.generate(
prompts=["cat sitting on bench"],# prompts is the list for how many images you have to genrate per request
action="text2img",
)
print(data)
from AIBridge import StableDiffusion
init_image = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
data = StableDiffusion.generate(
prompts=["cat sitting on bench"],
image_data=[init_image], #image data is the list followed by the prompts for each prompt there should be one image data
action="img2img",
)
print(data)
from AIBridge import StableDiffusion
mask_image = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
data = StableDiffusion.generate(
prompts=["cat sitting on bench"],
image_data=[init_image],
mask_image=[mask_image],
action="inpaint",
)
print(data)
from AIBridge import StableDiffusion
data=StableDiffusion.fetch_image(id="image_id here")
print(Data)
{
"status": "success",
"id": 12202888,
"output": [
"https://pub-8b49af329fae499aa563997f5d4068a4.r2.dev/generations/e5cd86d3-7305-47fc-82c1-7d1a3b130fa4-0.png"
]
}
from AIBridge import StableDiffusion
data=StableDiffusion.system_load(id="image_id here")
print(Data)
{
"queue_num": 0,
"queue_time": 0,
"status": "ok"
}
cohere_api:
- key: API Key here
priority: equal
from AIBridge import SetConfig
SetConfig.set_api_key(ai_service="cohere_api",key="YOUR_API_KEY",priority="high")
#priority:high/medium/low/equal
from AIBridge import CohereApi
json_str = """{"text": "text here"}"""
csv_string = "name,animal_type,prey,predators"
xml_string = """<data><animal>animal information here</animal></data>"""
data = CohereApi.generate(
prompts=["give the me the more information about the {{animals}}"],
prompt_data=[{"animals": "animals"}],
format_strcture=[xml_string],
output_format=["xml"],
)
print(data)
# Prameters
# prompts= list of the string that need to executed in session where output id dependant on each other,
# promts_ids= prompt id's list and so at a time ids will execute or prompts,
# prompt_data=[data of the every prompt id they required],
# variables=[ varibale dict of the prompt],
# output_format=["xml/json/csv/sql/"],
# format_strcture=[out put strcture of the prompt],
# model="models/text-bison-001", model for generate api of the palm
# variation_count = the output require
# max_tokens = default 10000, maximut token per out put, no limit for token"
# temperature = default-0.5, data consistecy
# message_queue=False, scalability purpose
ai21_api:
- key: API Key here
priority: equal
from AIBridge import SetConfig
SetConfig.set_api_key(ai_service="ai21_api",key="YOUR_API_KEY",priority="high")
#priority:high/medium/low/equal
from AIBridge import AI21labsText
json_str = """{"text": "text here"}"""
csv_string = "name,animal_type,prey,predators"
xml_string = """<?xml version="1.0" encoding="UTF-8" ?><data><animal>animal information here</animal></data>"""
data = AI21labsText.generate(
prompts=["give the me the more information about the {{animals}}"],
prompt_data=[{"animals": "tigers"}],
format_strcture=[csv_string],
output_format=["csv"],
)
print(data)
# Prameters
# prompts= list of the string that need to executed in session where output id dependant on each other,
# promts_ids= prompt id's list and so at a time ids will execute or prompts,
# prompt_data=[data of the every prompt id they required],
# variables=[ varibale dict of the prompt],
# output_format=["xml/json/csv/sql/"],
# format_strcture=[out put strcture of the prompt],
# model="models/text-bison-001", model for generate api of the palm
# variation_count = the output require
# max_tokens = default 10000, maximut token per out put, no limit for token"
# temperature = default-0.5, data consistecy
# message_queue=False, scalability purpose
FAQs
Bridge for LLM"s
We found that aibridge-test demonstrated a healthy version release cadence and project activity because the last version was released less than a year ago. It has 1 open source maintainer collaborating on the project.
Did you know?
Socket for GitHub automatically highlights issues in each pull request and monitors the health of all your open source dependencies. Discover the contents of your packages and block harmful activity before you install or update your dependencies.
Security News
Research
The Socket Research Team breaks down a malicious wrapper package that uses obfuscation to harvest credentials and exfiltrate sensitive data.
Research
Security News
Attackers used a malicious npm package typosquatting a popular ESLint plugin to steal sensitive data, execute commands, and exploit developer systems.
Security News
The Ultralytics' PyPI Package was compromised four times in one weekend through GitHub Actions cache poisoning and failure to rotate previously compromised API tokens.