🚀 Big News: Socket Acquires Coana to Bring Reachability Analysis to Every Appsec Team.Learn more
Socket
Book a DemoInstallSign in
Socket

@arizeai/openinference-semantic-conventions

Package Overview
Dependencies
Maintainers
0
Versions
32
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@arizeai/openinference-semantic-conventions - npm Package Compare versions

Comparing version

to
0.10.0

13

dist/esm/trace/SemanticConventions.d.ts

@@ -33,2 +33,3 @@ /**

readonly function_call: "function_call";
readonly tools: "tools";
};

@@ -59,2 +60,3 @@ export declare const LLMPromptTemplateAttributePostfixes: {

readonly parameters: "parameters";
readonly json_schema: "json_schema";
};

@@ -232,2 +234,6 @@ export declare const MessageAttributePostfixes: {

/**
* List of tools that are advertised to the LLM to be able to call
*/
export declare const LLM_TOOLS: "llm.tools";
/**
* The name of a tool

@@ -245,2 +251,7 @@ */

/**
* The json schema of a tool input, It is RECOMMENDED that this be in the
* OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools
*/
export declare const TOOL_JSON_SCHEMA: "tool.json_schema";
/**
* The session id of a trace. Used to correlate spans in a single session.

@@ -299,2 +310,3 @@ */

readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total";
readonly LLM_TOOLS: "llm.tools";
readonly MESSAGE_ROLE: "message.role";

@@ -323,2 +335,3 @@ readonly MESSAGE_NAME: "message.name";

readonly TOOL_PARAMETERS: "tool.parameters";
readonly TOOL_JSON_SCHEMA: "tool.json_schema";
readonly PROMPT_TEMPLATE_VARIABLES: "llm.prompt_template.variables";

@@ -325,0 +338,0 @@ readonly PROMPT_TEMPLATE_TEMPLATE: "llm.prompt_template.template";

@@ -33,2 +33,3 @@ /**

function_call: "function_call",
tools: "tools",
};

@@ -59,2 +60,3 @@ export var LLMPromptTemplateAttributePostfixes = {

parameters: "parameters",
json_schema: "json_schema",
};

@@ -233,2 +235,6 @@ export var MessageAttributePostfixes = {

/**
* List of tools that are advertised to the LLM to be able to call
*/
export var LLM_TOOLS = "".concat(SemanticAttributePrefixes.llm, ".").concat(LLMAttributePostfixes.tools);
/**
* The name of a tool

@@ -246,2 +252,7 @@ */

/**
* The json schema of a tool input, It is RECOMMENDED that this be in the
* OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools
*/
export var TOOL_JSON_SCHEMA = "".concat(SemanticAttributePrefixes.tool, ".").concat(ToolAttributePostfixes.json_schema);
/**
* The session id of a trace. Used to correlate spans in a single session.

@@ -300,2 +311,3 @@ */

LLM_TOKEN_COUNT_TOTAL: LLM_TOKEN_COUNT_TOTAL,
LLM_TOOLS: LLM_TOOLS,
MESSAGE_ROLE: MESSAGE_ROLE,

@@ -324,2 +336,3 @@ MESSAGE_NAME: MESSAGE_NAME,

TOOL_PARAMETERS: TOOL_PARAMETERS,
TOOL_JSON_SCHEMA: TOOL_JSON_SCHEMA,
PROMPT_TEMPLATE_VARIABLES: PROMPT_TEMPLATE_VARIABLES,

@@ -326,0 +339,0 @@ PROMPT_TEMPLATE_TEMPLATE: PROMPT_TEMPLATE_TEMPLATE,

@@ -33,2 +33,3 @@ /**

readonly function_call: "function_call";
readonly tools: "tools";
};

@@ -59,2 +60,3 @@ export declare const LLMPromptTemplateAttributePostfixes: {

readonly parameters: "parameters";
readonly json_schema: "json_schema";
};

@@ -232,2 +234,6 @@ export declare const MessageAttributePostfixes: {

/**
* List of tools that are advertised to the LLM to be able to call
*/
export declare const LLM_TOOLS: "llm.tools";
/**
* The name of a tool

@@ -245,2 +251,7 @@ */

/**
* The json schema of a tool input, It is RECOMMENDED that this be in the
* OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools
*/
export declare const TOOL_JSON_SCHEMA: "tool.json_schema";
/**
* The session id of a trace. Used to correlate spans in a single session.

@@ -299,2 +310,3 @@ */

readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total";
readonly LLM_TOOLS: "llm.tools";
readonly MESSAGE_ROLE: "message.role";

@@ -323,2 +335,3 @@ readonly MESSAGE_NAME: "message.name";

readonly TOOL_PARAMETERS: "tool.parameters";
readonly TOOL_JSON_SCHEMA: "tool.json_schema";
readonly PROMPT_TEMPLATE_VARIABLES: "llm.prompt_template.variables";

@@ -325,0 +338,0 @@ readonly PROMPT_TEMPLATE_TEMPLATE: "llm.prompt_template.template";

@@ -33,2 +33,3 @@ /**

function_call: "function_call",
tools: "tools",
};

@@ -59,2 +60,3 @@ export const LLMPromptTemplateAttributePostfixes = {

parameters: "parameters",
json_schema: "json_schema",
};

@@ -233,2 +235,6 @@ export const MessageAttributePostfixes = {

/**
* List of tools that are advertised to the LLM to be able to call
*/
export const LLM_TOOLS = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.tools}`;
/**
* The name of a tool

@@ -246,2 +252,7 @@ */

/**
* The json schema of a tool input, It is RECOMMENDED that this be in the
* OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools
*/
export const TOOL_JSON_SCHEMA = `${SemanticAttributePrefixes.tool}.${ToolAttributePostfixes.json_schema}`;
/**
* The session id of a trace. Used to correlate spans in a single session.

@@ -300,2 +311,3 @@ */

LLM_TOKEN_COUNT_TOTAL,
LLM_TOOLS,
MESSAGE_ROLE,

@@ -324,2 +336,3 @@ MESSAGE_NAME,

TOOL_PARAMETERS,
TOOL_JSON_SCHEMA,
PROMPT_TEMPLATE_VARIABLES,

@@ -326,0 +339,0 @@ PROMPT_TEMPLATE_TEMPLATE,

@@ -33,2 +33,3 @@ /**

readonly function_call: "function_call";
readonly tools: "tools";
};

@@ -59,2 +60,3 @@ export declare const LLMPromptTemplateAttributePostfixes: {

readonly parameters: "parameters";
readonly json_schema: "json_schema";
};

@@ -232,2 +234,6 @@ export declare const MessageAttributePostfixes: {

/**
* List of tools that are advertised to the LLM to be able to call
*/
export declare const LLM_TOOLS: "llm.tools";
/**
* The name of a tool

@@ -245,2 +251,7 @@ */

/**
* The json schema of a tool input, It is RECOMMENDED that this be in the
* OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools
*/
export declare const TOOL_JSON_SCHEMA: "tool.json_schema";
/**
* The session id of a trace. Used to correlate spans in a single session.

@@ -299,2 +310,3 @@ */

readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total";
readonly LLM_TOOLS: "llm.tools";
readonly MESSAGE_ROLE: "message.role";

@@ -323,2 +335,3 @@ readonly MESSAGE_NAME: "message.name";

readonly TOOL_PARAMETERS: "tool.parameters";
readonly TOOL_JSON_SCHEMA: "tool.json_schema";
readonly PROMPT_TEMPLATE_VARIABLES: "llm.prompt_template.variables";

@@ -325,0 +338,0 @@ readonly PROMPT_TEMPLATE_TEMPLATE: "llm.prompt_template.template";

15

dist/src/trace/SemanticConventions.js

@@ -7,3 +7,3 @@ "use strict";

exports.PROMPT_TEMPLATE_VARIABLES = exports.RETRIEVAL_DOCUMENTS = exports.EMBEDDING_EMBEDDINGS = exports.EMBEDDING_VECTOR = exports.EMBEDDING_MODEL_NAME = exports.EMBEDDING_TEXT = exports.DOCUMENT_METADATA = exports.DOCUMENT_SCORE = exports.DOCUMENT_CONTENT = exports.DOCUMENT_ID = exports.IMAGE_URL = exports.MESSAGE_CONTENT_IMAGE = exports.MESSAGE_CONTENT_TEXT = exports.MESSAGE_CONTENT_TYPE = exports.MESSAGE_CONTENTS = exports.MESSAGE_CONTENT = exports.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = exports.MESSAGE_FUNCTION_CALL_NAME = exports.TOOL_CALL_FUNCTION_ARGUMENTS_JSON = exports.TOOL_CALL_FUNCTION_NAME = exports.MESSAGE_TOOL_CALLS = exports.MESSAGE_NAME = exports.MESSAGE_ROLE = exports.LLM_TOKEN_COUNT_TOTAL = exports.LLM_TOKEN_COUNT_PROMPT = exports.LLM_TOKEN_COUNT_COMPLETION = exports.LLM_MODEL_NAME = exports.LLM_OUTPUT_MESSAGES = exports.LLM_INVOCATION_PARAMETERS = exports.LLM_PROMPTS = exports.LLM_INPUT_MESSAGES = exports.OUTPUT_MIME_TYPE = exports.OUTPUT_VALUE = exports.INPUT_MIME_TYPE = exports.INPUT_VALUE = exports.UserAttributePostfixes = exports.SessionAttributePostfixes = exports.TagAttributePostfixes = exports.DocumentAttributePostfixes = exports.ToolCallAttributePostfixes = exports.ImageAttributesPostfixes = exports.MessageContentsAttributePostfixes = exports.MessageAttributePostfixes = exports.ToolAttributePostfixes = exports.EmbeddingAttributePostfixes = exports.RerankerAttributePostfixes = exports.RetrievalAttributePostfixes = exports.LLMPromptTemplateAttributePostfixes = exports.LLMAttributePostfixes = exports.SemanticAttributePrefixes = void 0;
exports.MimeType = exports.OpenInferenceSpanKind = exports.SemanticConventions = exports.TAG_TAGS = exports.PROMPT_TEMPLATE_VERSION = exports.METADATA = exports.RERANKER_TOP_K = exports.RERANKER_MODEL_NAME = exports.RERANKER_QUERY = exports.RERANKER_OUTPUT_DOCUMENTS = exports.RERANKER_INPUT_DOCUMENTS = exports.USER_ID = exports.SESSION_ID = exports.TOOL_PARAMETERS = exports.TOOL_DESCRIPTION = exports.TOOL_NAME = exports.LLM_FUNCTION_CALL = exports.PROMPT_TEMPLATE_TEMPLATE = void 0;
exports.MimeType = exports.OpenInferenceSpanKind = exports.SemanticConventions = exports.TAG_TAGS = exports.PROMPT_TEMPLATE_VERSION = exports.METADATA = exports.RERANKER_TOP_K = exports.RERANKER_MODEL_NAME = exports.RERANKER_QUERY = exports.RERANKER_OUTPUT_DOCUMENTS = exports.RERANKER_INPUT_DOCUMENTS = exports.USER_ID = exports.SESSION_ID = exports.TOOL_JSON_SCHEMA = exports.TOOL_PARAMETERS = exports.TOOL_DESCRIPTION = exports.TOOL_NAME = exports.LLM_TOOLS = exports.LLM_FUNCTION_CALL = exports.PROMPT_TEMPLATE_TEMPLATE = void 0;
exports.SemanticAttributePrefixes = {

@@ -38,2 +38,3 @@ input: "input",

function_call: "function_call",
tools: "tools",
};

@@ -64,2 +65,3 @@ exports.LLMPromptTemplateAttributePostfixes = {

parameters: "parameters",
json_schema: "json_schema",
};

@@ -238,2 +240,6 @@ exports.MessageAttributePostfixes = {

/**
* List of tools that are advertised to the LLM to be able to call
*/
exports.LLM_TOOLS = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.tools}`;
/**
* The name of a tool

@@ -251,2 +257,7 @@ */

/**
* The json schema of a tool input, It is RECOMMENDED that this be in the
* OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools
*/
exports.TOOL_JSON_SCHEMA = `${exports.SemanticAttributePrefixes.tool}.${exports.ToolAttributePostfixes.json_schema}`;
/**
* The session id of a trace. Used to correlate spans in a single session.

@@ -305,2 +316,3 @@ */

LLM_TOKEN_COUNT_TOTAL: exports.LLM_TOKEN_COUNT_TOTAL,
LLM_TOOLS: exports.LLM_TOOLS,
MESSAGE_ROLE: exports.MESSAGE_ROLE,

@@ -329,2 +341,3 @@ MESSAGE_NAME: exports.MESSAGE_NAME,

TOOL_PARAMETERS: exports.TOOL_PARAMETERS,
TOOL_JSON_SCHEMA: exports.TOOL_JSON_SCHEMA,
PROMPT_TEMPLATE_VARIABLES: exports.PROMPT_TEMPLATE_VARIABLES,

@@ -331,0 +344,0 @@ PROMPT_TEMPLATE_TEMPLATE: exports.PROMPT_TEMPLATE_TEMPLATE,

2

package.json
{
"name": "@arizeai/openinference-semantic-conventions",
"version": "0.9.0",
"version": "0.10.0",
"private": false,

@@ -5,0 +5,0 @@ "main": "dist/src/index.js",

@@ -35,2 +35,3 @@ /**

function_call: "function_call",
tools: "tools",
} as const;

@@ -66,2 +67,3 @@

parameters: "parameters",
json_schema: "json_schema",
} as const;

@@ -313,2 +315,8 @@

/**
* List of tools that are advertised to the LLM to be able to call
*/
export const LLM_TOOLS =
`${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.tools}` as const;
/**
* The name of a tool

@@ -332,2 +340,9 @@ */

/**
* The json schema of a tool input, It is RECOMMENDED that this be in the
* OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools
*/
export const TOOL_JSON_SCHEMA =
`${SemanticAttributePrefixes.tool}.${ToolAttributePostfixes.json_schema}` as const;
/**
* The session id of a trace. Used to correlate spans in a single session.

@@ -405,2 +420,3 @@ */

LLM_TOKEN_COUNT_TOTAL,
LLM_TOOLS,
MESSAGE_ROLE,

@@ -429,2 +445,3 @@ MESSAGE_NAME,

TOOL_PARAMETERS,
TOOL_JSON_SCHEMA,
PROMPT_TEMPLATE_VARIABLES,

@@ -431,0 +448,0 @@ PROMPT_TEMPLATE_TEMPLATE,

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet