You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

bafcode

Package Overview
Dependencies
Maintainers
1
Versions
17
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

bafcode - pypi Package Compare versions

Comparing version
1.0.7
to
1.0.8
+1
-1
bafcode.egg-info/PKG-INFO
Metadata-Version: 2.1
Name: bafcode
Version: 1.0.7
Version: 1.0.8
Summary: BafCode Framework CLI

@@ -5,0 +5,0 @@ Home-page: https://github.com/aitelabranding/bafcode_cli

from .api_context import ApiContext
from .llms_context import LlmContext
from .prompts_context import PromptsContext
from .prompts_context import PromptContext
from .tools_context import ToolContext

@@ -6,27 +6,30 @@

class ApiContext:
def format_word(word):
formatted_word = word.replace('/', '_')
return formatted_word
def context(api_name):
formatted_word = ApiContext.format_word(api_name)
file_context= """
import requests
from core import BafLog
import requests
from core import BafLog
YOUR_API_ENDPOINT = "https://fakerapi.it/api/v1/texts?_quantity=1&_characters=500" # Placeholder email API endpoint
YOUR_API_ENDPOINT = "https://fakerapi.it/api/v1/texts?_quantity=1&_characters=500" # Placeholder email API endpoint
logger = BafLog
def {api_name}('Pass any required parameters here e.g., user_id=None'):
def {api_name}(your_parameters):
response = requests.get(YOUR_API_ENDPOINT, params=your_parameters)
logger = BafLog
# Handle API response
if response.status_code != 200:
logger.error(f"Error fetching last {api_name} data. API response: {response.text}")
raise Exception(f"Error fetching last {api_name} data. API response: {response.text}")
params = ' Pass any required parameters here e.g., {'user_id': user_id}'
response = requests.get(YOUR_API_ENDPOINT, params=params)
# Handle API response
if response.status_code != 200:
logger.error(f"Error fetching last email for user {user_id}. API response: {response.text}")
raise Exception(f"Error fetching last email. API responded with: {response.text}")
your_data_variable = response.json()
return your_data_variable
your_data_variable = response.json()
return your_data_variable
"""
return file_context.replace("{api_name}", api_name)
return file_context.replace("{api_name}", formatted_word)

@@ -8,41 +8,46 @@

def snake_to_camel(word):
"""
Convert snake_case or a single word to CamelCase.
"""
return ''.join(x.capitalize() for x in word.split('_'))
formatted_word = word.replace('/', '_')
return ''.join(x.capitalize() for x in formatted_word.split('_'))
def format_word(word):
formatted_word = word.replace('/', '_')
return formatted_word
def context(llm_name):
camel_case_name = LlmContext.snake_to_camel(llm_name)
formatted_word = LlmContext.format_word(llm_name)
file_context= """
from core import BafLog
from config import Config
# Optionally, import any other required modules or packages
# E.g., from api import YourLLMAPI
from core import BafLog
from config import Config
# Optionally, import any other required modules or packages
# E.g., from api import YourLLMAPI
class {llm_name}:
def __init__(self):
self.logger = BafLog
class {llm_name}LLM:
def __init__(self):
self.logger = BafLog
# Initialize your LLM API config here
# Initialize your LLM API config here
def process(self,message,prompt):
if not prompt:
self.logger.error("No prompt provided for OpenAI LLM.")
raise ValueError("A prompt is required for processing.")
def process(self,message,prompt):
if not prompt:
self.logger.error("No prompt provided for {llm_name} LLM.")
raise ValueError("A prompt is required for processing.")
try:
# use your LLM API and pass in the prompt and message to process here
response = 'Use your LLM API here e.g., YourLLMAPI.process(prompt,message)'
return response
# Response should be a string e.g., "This is a response from the LLM API."
try:
# use your LLM API and pass in the prompt and message to process here
response = 'Use your LLM API here e.g., YourLLMAPI.process(prompt,message)'
return response
# Response should be a string e.g., "This is a response from the LLM API."
except Exception as e:
self.logger.error(f"Error processing with OpenAI LLM: {str(e)}")
return {
'message': "Error processing with OpenAI LLM.",
'status': "error"
}
except Exception as e:
self.logger.error(f"Error processing with {llm_name} LLM: {str(e)}")
return {
'message': "Error processing with LLM.",
'status': "error"
}

@@ -49,0 +54,0 @@

@@ -8,7 +8,10 @@

def snake_to_camel(word):
"""
Convert snake_case or a single word to CamelCase.
"""
return ''.join(x.capitalize() for x in word.split('_'))
formatted_word = word.replace('/', '_')
return ''.join(x.capitalize() for x in formatted_word.split('_'))
def format_word(word):
formatted_word = word.replace('/', '_')
return formatted_word

@@ -19,11 +22,12 @@

camel_case_name = PromptContext.snake_to_camel(prompt_name)
formatted_word = PromptContext.format_word(prompt_name)
file_context= """
from core import BafLog
# Optionally, import any other required modules or packages
from core import BafLog
# Optionally, import any other required modules or packages
class {prompt_name}: # Replace {prompt_name} with the name of your prompt
def {function}(data):
prompt = {string}
class {prompt_name}: # Replace {prompt_name} with the name of your prompt
def {function}(data):
prompt = {string}

@@ -38,6 +42,6 @@

return prompt.format(data=data)
return prompt.format(data=data)
"""
return file_context.format(prompt_name=camel_case_name, string='"""',function=prompt_name)
return file_context.format(prompt_name=camel_case_name, string='"""',function=formatted_word)

@@ -8,23 +8,21 @@

def snake_to_camel(word):
"""
Convert snake_case or a single word to CamelCase.
"""
return ''.join(x.capitalize() for x in word.split('_'))
formatted_word = word.replace('/', '_')
return ''.join(x.capitalize() for x in formatted_word.split('_'))
def context(tool_name):
camel_case_name = ToolContext.snake_to_camel(tool_name)
file_context= """
from core import BafLog
# Optionally, import any other required modules or packages
# E.g., from api import YourAPI
# E.g., from prompts import YourPrompt
from core import BafLog
# Optionally, import any other required modules or packages
# E.g., from api import YourAPI
# E.g., from prompts import YourPrompt
class {tool_name}: # Replace {tool_name} with the name of your tool
def __init__(self):
self.logger = BafLog
class {tool_name}: # Replace {tool_name} with the name of your tool
def __init__(self):
self.logger = BafLog
def execute(self, data):
prompt = 'Use your imported prompt here e.g., YourPrompt.your_function(data)'
return prompt
def execute(self, data):
prompt = 'Use your imported prompt here e.g., YourPrompt.your_function(data)'
return prompt

@@ -31,0 +29,0 @@

@@ -28,3 +28,3 @@ import os

full_path = os.path.join(base_dir, name + ".py")
file_context = ToolContext(name)
file_context = ToolContext.context(name)
create_file(full_path,file_context)

@@ -34,3 +34,3 @@ elif type_ == "api":

full_path = os.path.join(base_dir, name + ".py")
file_context = ApiContext(name)
file_context = ApiContext.context(name)
create_file(full_path,file_context)

@@ -40,3 +40,3 @@ elif type_ == "prompt":

full_path = os.path.join(base_dir, name + ".py")
file_context = PromptContext(name)
file_context = PromptContext.context(name)
create_file(full_path,file_context)

@@ -46,3 +46,3 @@ elif type_ == "llm":

full_path = os.path.join(base_dir, name + ".py")
file_context = LlmContext(name)
file_context = LlmContext.context(name)
create_file(full_path,file_context)

@@ -49,0 +49,0 @@ else:

Metadata-Version: 2.1
Name: bafcode
Version: 1.0.7
Version: 1.0.8
Summary: BafCode Framework CLI

@@ -5,0 +5,0 @@ Home-page: https://github.com/aitelabranding/bafcode_cli

@@ -5,3 +5,3 @@ from setuptools import setup, find_packages

name="bafcode",
version="1.0.7",
version="1.0.8",
packages=find_packages(),

@@ -8,0 +8,0 @@ install_requires=[