Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoSign in
Socket

bafcode

Package Overview
Dependencies
Maintainers
1
Versions
17
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

bafcode - pypi Package Compare versions

Comparing version
1.0.6
to
1.0.7
+4
cli/commands/make_context/__init__.py
from .api_context import ApiContext
from .llms_context import LlmContext
from .prompts_context import PromptsContext
from .tools_context import ToolContext
class ApiContext:
def context(api_name):
file_context= """
import requests
from core import BafLog
YOUR_API_ENDPOINT = "https://fakerapi.it/api/v1/texts?_quantity=1&_characters=500" # Placeholder email API endpoint
def {api_name}('Pass any required parameters here e.g., user_id=None'):
logger = BafLog
params = ' Pass any required parameters here e.g., {'user_id': user_id}'
response = requests.get(YOUR_API_ENDPOINT, params=params)
# Handle API response
if response.status_code != 200:
logger.error(f"Error fetching last email for user {user_id}. API response: {response.text}")
raise Exception(f"Error fetching last email. API responded with: {response.text}")
your_data_variable = response.json()
return your_data_variable
"""
return file_context.replace("{api_name}", api_name)
class LlmContext:
@staticmethod
def snake_to_camel(word):
"""
Convert snake_case or a single word to CamelCase.
"""
return ''.join(x.capitalize() for x in word.split('_'))
def context(llm_name):
camel_case_name = LlmContext.snake_to_camel(llm_name)
file_context= """
from core import BafLog
from config import Config
# Optionally, import any other required modules or packages
# E.g., from api import YourLLMAPI
class {llm_name}:
def __init__(self):
self.logger = BafLog
# Initialize your LLM API config here
def process(self,message,prompt):
if not prompt:
self.logger.error("No prompt provided for OpenAI LLM.")
raise ValueError("A prompt is required for processing.")
try:
# use your LLM API and pass in the prompt and message to process here
response = 'Use your LLM API here e.g., YourLLMAPI.process(prompt,message)'
return response
# Response should be a string e.g., "This is a response from the LLM API."
except Exception as e:
self.logger.error(f"Error processing with OpenAI LLM: {str(e)}")
return {
'message': "Error processing with OpenAI LLM.",
'status': "error"
}
"""
return file_context.replace("{llm_name}", camel_case_name)
class PromptContext:
@staticmethod
def snake_to_camel(word):
"""
Convert snake_case or a single word to CamelCase.
"""
return ''.join(x.capitalize() for x in word.split('_'))
def context(prompt_name):
camel_case_name = PromptContext.snake_to_camel(prompt_name)
file_context= """
from core import BafLog
# Optionally, import any other required modules or packages
class {prompt_name}: # Replace {prompt_name} with the name of your prompt
def {function}(data):
prompt = {string}
{prompt_name} Data:
{data}
{string}
return prompt.format(data=data)
"""
return file_context.format(prompt_name=camel_case_name, string='"""',function=prompt_name)
class ToolContext:
@staticmethod
def snake_to_camel(word):
"""
Convert snake_case or a single word to CamelCase.
"""
return ''.join(x.capitalize() for x in word.split('_'))
def context(tool_name):
camel_case_name = ToolContext.snake_to_camel(tool_name)
file_context= """
from core import BafLog
# Optionally, import any other required modules or packages
# E.g., from api import YourAPI
# E.g., from prompts import YourPrompt
class {tool_name}: # Replace {tool_name} with the name of your tool
def __init__(self):
self.logger = BafLog
def execute(self, data):
prompt = 'Use your imported prompt here e.g., YourPrompt.your_function(data)'
return prompt
"""
return file_context.replace("{tool_name}", camel_case_name)
+1
-1
Metadata-Version: 2.1
Name: bafcode
Version: 1.0.6
Version: 1.0.7
Summary: BafCode Framework CLI

@@ -5,0 +5,0 @@ Home-page: https://github.com/aitelabranding/bafcode_cli

@@ -15,2 +15,7 @@ README.md

cli/commands/setup.py
cli/commands/start.py
cli/commands/start.py
cli/commands/make_context/__init__.py
cli/commands/make_context/api_context.py
cli/commands/make_context/llms_context.py
cli/commands/make_context/prompts_context.py
cli/commands/make_context/tools_context.py
import os
import sys
from .make_context import ApiContext, LlmContext, PromptContext, ToolContext
def create_file(path):
def create_file(path,context):
# Ensure the directory exists

@@ -17,3 +18,3 @@ directory = os.path.dirname(path)

with open(path, 'w') as f:
f.write("# Generated by BafCode bafcode cli y\n")
f.write(context)
print(f"File '{path}' created successfully!")

@@ -28,17 +29,21 @@

full_path = os.path.join(base_dir, name + ".py")
create_file(full_path)
file_context = ToolContext(name)
create_file(full_path,file_context)
elif type_ == "api":
base_dir = "api"
full_path = os.path.join(base_dir, name + ".py")
create_file(full_path)
file_context = ApiContext(name)
create_file(full_path,file_context)
elif type_ == "prompt":
base_dir = "prompts"
full_path = os.path.join(base_dir, name + ".py")
create_file(full_path)
file_context = PromptContext(name)
create_file(full_path,file_context)
elif type_ == "llm":
base_dir = "llms"
full_path = os.path.join(base_dir, name + ".py")
create_file(full_path)
file_context = LlmContext(name)
create_file(full_path,file_context)
else:
print(f"Error: Unknown type_ '{type_}'")
sys.exit(1)
Metadata-Version: 2.1
Name: bafcode
Version: 1.0.6
Version: 1.0.7
Summary: BafCode Framework CLI

@@ -5,0 +5,0 @@ Home-page: https://github.com/aitelabranding/bafcode_cli

@@ -5,3 +5,3 @@ from setuptools import setup, find_packages

name="bafcode",
version="1.0.6",
version="1.0.7",
packages=find_packages(),

@@ -8,0 +8,0 @@ install_requires=[