You're Invited:Meet the Socket Team at BlackHat and DEF CON in Las Vegas, Aug 4-6.RSVP
Socket
Book a DemoInstallSign in
Socket

@arizeai/openinference-semantic-conventions

Package Overview
Dependencies
Maintainers
6
Versions
32
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@arizeai/openinference-semantic-conventions - npm Package Compare versions

Comparing version

to
0.11.0

26

dist/esm/trace/SemanticConventions.d.ts

@@ -133,2 +133,10 @@ /**

export declare const LLM_MODEL_NAME: "llm.model_name";
/**
* The provider of the inferences. E.g. the cloud provider
*/
export declare const LLM_PROVIDER: "llm.provider";
/**
* The AI product as identified by the client or server
*/
export declare const LLM_SYSTEM: "llm.system";
/** Token count for the completion by the llm */

@@ -307,2 +315,4 @@ export declare const LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion";

readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total";
readonly LLM_SYSTEM: "llm.system";
readonly LLM_PROVIDER: "llm.provider";
readonly LLM_TOOLS: "llm.tools";

@@ -364,2 +374,18 @@ readonly MESSAGE_ROLE: "message.role";

}
export declare enum LLMSystem {
OPENAI = "openai",
ANTHROPIC = "anthropic",
MISTRALAI = "mistralai",
COHERE = "cohere",
VERTEXAI = "vertexai"
}
export declare enum LLMProvider {
OPENAI = "openai",
ANTHROPIC = "anthropic",
MISTRALAI = "mistralai",
COHERE = "cohere",
GOOGLE = "google",
AWS = "aws",
AZURE = "azure"
}
//# sourceMappingURL=SemanticConventions.d.ts.map

@@ -133,2 +133,10 @@ /**

export var LLM_MODEL_NAME = "".concat(SemanticAttributePrefixes.llm, ".").concat(LLMAttributePostfixes.model_name);
/**
* The provider of the inferences. E.g. the cloud provider
*/
export var LLM_PROVIDER = "".concat(SemanticAttributePrefixes.llm, ".provider");
/**
* The AI product as identified by the client or server
*/
export var LLM_SYSTEM = "".concat(SemanticAttributePrefixes.llm, ".system");
/** Token count for the completion by the llm */

@@ -308,2 +316,4 @@ export var LLM_TOKEN_COUNT_COMPLETION = "".concat(SemanticAttributePrefixes.llm, ".").concat(LLMAttributePostfixes.token_count, ".completion");

LLM_TOKEN_COUNT_TOTAL: LLM_TOKEN_COUNT_TOTAL,
LLM_SYSTEM: LLM_SYSTEM,
LLM_PROVIDER: LLM_PROVIDER,
LLM_TOOLS: LLM_TOOLS,

@@ -367,2 +377,21 @@ MESSAGE_ROLE: MESSAGE_ROLE,

})(MimeType || (MimeType = {}));
export var LLMSystem;
(function (LLMSystem) {
LLMSystem["OPENAI"] = "openai";
LLMSystem["ANTHROPIC"] = "anthropic";
LLMSystem["MISTRALAI"] = "mistralai";
LLMSystem["COHERE"] = "cohere";
LLMSystem["VERTEXAI"] = "vertexai";
})(LLMSystem || (LLMSystem = {}));
export var LLMProvider;
(function (LLMProvider) {
LLMProvider["OPENAI"] = "openai";
LLMProvider["ANTHROPIC"] = "anthropic";
LLMProvider["MISTRALAI"] = "mistralai";
LLMProvider["COHERE"] = "cohere";
// Cloud Providers of LLM systems
LLMProvider["GOOGLE"] = "google";
LLMProvider["AWS"] = "aws";
LLMProvider["AZURE"] = "azure";
})(LLMProvider || (LLMProvider = {}));
//# sourceMappingURL=SemanticConventions.js.map

@@ -133,2 +133,10 @@ /**

export declare const LLM_MODEL_NAME: "llm.model_name";
/**
* The provider of the inferences. E.g. the cloud provider
*/
export declare const LLM_PROVIDER: "llm.provider";
/**
* The AI product as identified by the client or server
*/
export declare const LLM_SYSTEM: "llm.system";
/** Token count for the completion by the llm */

@@ -307,2 +315,4 @@ export declare const LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion";

readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total";
readonly LLM_SYSTEM: "llm.system";
readonly LLM_PROVIDER: "llm.provider";
readonly LLM_TOOLS: "llm.tools";

@@ -364,2 +374,18 @@ readonly MESSAGE_ROLE: "message.role";

}
export declare enum LLMSystem {
OPENAI = "openai",
ANTHROPIC = "anthropic",
MISTRALAI = "mistralai",
COHERE = "cohere",
VERTEXAI = "vertexai"
}
export declare enum LLMProvider {
OPENAI = "openai",
ANTHROPIC = "anthropic",
MISTRALAI = "mistralai",
COHERE = "cohere",
GOOGLE = "google",
AWS = "aws",
AZURE = "azure"
}
//# sourceMappingURL=SemanticConventions.d.ts.map

@@ -133,2 +133,10 @@ /**

export const LLM_MODEL_NAME = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.model_name}`;
/**
* The provider of the inferences. E.g. the cloud provider
*/
export const LLM_PROVIDER = `${SemanticAttributePrefixes.llm}.provider`;
/**
* The AI product as identified by the client or server
*/
export const LLM_SYSTEM = `${SemanticAttributePrefixes.llm}.system`;
/** Token count for the completion by the llm */

@@ -308,2 +316,4 @@ export const LLM_TOKEN_COUNT_COMPLETION = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.completion`;

LLM_TOKEN_COUNT_TOTAL,
LLM_SYSTEM,
LLM_PROVIDER,
LLM_TOOLS,

@@ -367,2 +377,21 @@ MESSAGE_ROLE,

})(MimeType || (MimeType = {}));
export var LLMSystem;
(function (LLMSystem) {
LLMSystem["OPENAI"] = "openai";
LLMSystem["ANTHROPIC"] = "anthropic";
LLMSystem["MISTRALAI"] = "mistralai";
LLMSystem["COHERE"] = "cohere";
LLMSystem["VERTEXAI"] = "vertexai";
})(LLMSystem || (LLMSystem = {}));
export var LLMProvider;
(function (LLMProvider) {
LLMProvider["OPENAI"] = "openai";
LLMProvider["ANTHROPIC"] = "anthropic";
LLMProvider["MISTRALAI"] = "mistralai";
LLMProvider["COHERE"] = "cohere";
// Cloud Providers of LLM systems
LLMProvider["GOOGLE"] = "google";
LLMProvider["AWS"] = "aws";
LLMProvider["AZURE"] = "azure";
})(LLMProvider || (LLMProvider = {}));
//# sourceMappingURL=SemanticConventions.js.map

@@ -133,2 +133,10 @@ /**

export declare const LLM_MODEL_NAME: "llm.model_name";
/**
* The provider of the inferences. E.g. the cloud provider
*/
export declare const LLM_PROVIDER: "llm.provider";
/**
* The AI product as identified by the client or server
*/
export declare const LLM_SYSTEM: "llm.system";
/** Token count for the completion by the llm */

@@ -307,2 +315,4 @@ export declare const LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion";

readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total";
readonly LLM_SYSTEM: "llm.system";
readonly LLM_PROVIDER: "llm.provider";
readonly LLM_TOOLS: "llm.tools";

@@ -364,2 +374,18 @@ readonly MESSAGE_ROLE: "message.role";

}
export declare enum LLMSystem {
OPENAI = "openai",
ANTHROPIC = "anthropic",
MISTRALAI = "mistralai",
COHERE = "cohere",
VERTEXAI = "vertexai"
}
export declare enum LLMProvider {
OPENAI = "openai",
ANTHROPIC = "anthropic",
MISTRALAI = "mistralai",
COHERE = "cohere",
GOOGLE = "google",
AWS = "aws",
AZURE = "azure"
}
//# sourceMappingURL=SemanticConventions.d.ts.map

33

dist/src/trace/SemanticConventions.js

@@ -6,4 +6,4 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
exports.PROMPT_TEMPLATE_VARIABLES = exports.RETRIEVAL_DOCUMENTS = exports.EMBEDDING_EMBEDDINGS = exports.EMBEDDING_VECTOR = exports.EMBEDDING_MODEL_NAME = exports.EMBEDDING_TEXT = exports.DOCUMENT_METADATA = exports.DOCUMENT_SCORE = exports.DOCUMENT_CONTENT = exports.DOCUMENT_ID = exports.IMAGE_URL = exports.MESSAGE_CONTENT_IMAGE = exports.MESSAGE_CONTENT_TEXT = exports.MESSAGE_CONTENT_TYPE = exports.MESSAGE_CONTENTS = exports.MESSAGE_CONTENT = exports.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = exports.MESSAGE_FUNCTION_CALL_NAME = exports.TOOL_CALL_FUNCTION_ARGUMENTS_JSON = exports.TOOL_CALL_FUNCTION_NAME = exports.MESSAGE_TOOL_CALLS = exports.MESSAGE_NAME = exports.MESSAGE_ROLE = exports.LLM_TOKEN_COUNT_TOTAL = exports.LLM_TOKEN_COUNT_PROMPT = exports.LLM_TOKEN_COUNT_COMPLETION = exports.LLM_MODEL_NAME = exports.LLM_OUTPUT_MESSAGES = exports.LLM_INVOCATION_PARAMETERS = exports.LLM_PROMPTS = exports.LLM_INPUT_MESSAGES = exports.OUTPUT_MIME_TYPE = exports.OUTPUT_VALUE = exports.INPUT_MIME_TYPE = exports.INPUT_VALUE = exports.UserAttributePostfixes = exports.SessionAttributePostfixes = exports.TagAttributePostfixes = exports.DocumentAttributePostfixes = exports.ToolCallAttributePostfixes = exports.ImageAttributesPostfixes = exports.MessageContentsAttributePostfixes = exports.MessageAttributePostfixes = exports.ToolAttributePostfixes = exports.EmbeddingAttributePostfixes = exports.RerankerAttributePostfixes = exports.RetrievalAttributePostfixes = exports.LLMPromptTemplateAttributePostfixes = exports.LLMAttributePostfixes = exports.SemanticAttributePrefixes = void 0;
exports.MimeType = exports.OpenInferenceSpanKind = exports.SemanticConventions = exports.TAG_TAGS = exports.PROMPT_TEMPLATE_VERSION = exports.METADATA = exports.RERANKER_TOP_K = exports.RERANKER_MODEL_NAME = exports.RERANKER_QUERY = exports.RERANKER_OUTPUT_DOCUMENTS = exports.RERANKER_INPUT_DOCUMENTS = exports.USER_ID = exports.SESSION_ID = exports.TOOL_JSON_SCHEMA = exports.TOOL_PARAMETERS = exports.TOOL_DESCRIPTION = exports.TOOL_NAME = exports.LLM_TOOLS = exports.LLM_FUNCTION_CALL = exports.PROMPT_TEMPLATE_TEMPLATE = void 0;
exports.EMBEDDING_EMBEDDINGS = exports.EMBEDDING_VECTOR = exports.EMBEDDING_MODEL_NAME = exports.EMBEDDING_TEXT = exports.DOCUMENT_METADATA = exports.DOCUMENT_SCORE = exports.DOCUMENT_CONTENT = exports.DOCUMENT_ID = exports.IMAGE_URL = exports.MESSAGE_CONTENT_IMAGE = exports.MESSAGE_CONTENT_TEXT = exports.MESSAGE_CONTENT_TYPE = exports.MESSAGE_CONTENTS = exports.MESSAGE_CONTENT = exports.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = exports.MESSAGE_FUNCTION_CALL_NAME = exports.TOOL_CALL_FUNCTION_ARGUMENTS_JSON = exports.TOOL_CALL_FUNCTION_NAME = exports.MESSAGE_TOOL_CALLS = exports.MESSAGE_NAME = exports.MESSAGE_ROLE = exports.LLM_TOKEN_COUNT_TOTAL = exports.LLM_TOKEN_COUNT_PROMPT = exports.LLM_TOKEN_COUNT_COMPLETION = exports.LLM_SYSTEM = exports.LLM_PROVIDER = exports.LLM_MODEL_NAME = exports.LLM_OUTPUT_MESSAGES = exports.LLM_INVOCATION_PARAMETERS = exports.LLM_PROMPTS = exports.LLM_INPUT_MESSAGES = exports.OUTPUT_MIME_TYPE = exports.OUTPUT_VALUE = exports.INPUT_MIME_TYPE = exports.INPUT_VALUE = exports.UserAttributePostfixes = exports.SessionAttributePostfixes = exports.TagAttributePostfixes = exports.DocumentAttributePostfixes = exports.ToolCallAttributePostfixes = exports.ImageAttributesPostfixes = exports.MessageContentsAttributePostfixes = exports.MessageAttributePostfixes = exports.ToolAttributePostfixes = exports.EmbeddingAttributePostfixes = exports.RerankerAttributePostfixes = exports.RetrievalAttributePostfixes = exports.LLMPromptTemplateAttributePostfixes = exports.LLMAttributePostfixes = exports.SemanticAttributePrefixes = void 0;
exports.LLMProvider = exports.LLMSystem = exports.MimeType = exports.OpenInferenceSpanKind = exports.SemanticConventions = exports.TAG_TAGS = exports.PROMPT_TEMPLATE_VERSION = exports.METADATA = exports.RERANKER_TOP_K = exports.RERANKER_MODEL_NAME = exports.RERANKER_QUERY = exports.RERANKER_OUTPUT_DOCUMENTS = exports.RERANKER_INPUT_DOCUMENTS = exports.USER_ID = exports.SESSION_ID = exports.TOOL_JSON_SCHEMA = exports.TOOL_PARAMETERS = exports.TOOL_DESCRIPTION = exports.TOOL_NAME = exports.LLM_TOOLS = exports.LLM_FUNCTION_CALL = exports.PROMPT_TEMPLATE_TEMPLATE = exports.PROMPT_TEMPLATE_VARIABLES = exports.RETRIEVAL_DOCUMENTS = void 0;
exports.SemanticAttributePrefixes = {

@@ -138,2 +138,10 @@ input: "input",

exports.LLM_MODEL_NAME = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.model_name}`;
/**
* The provider of the inferences. E.g. the cloud provider
*/
exports.LLM_PROVIDER = `${exports.SemanticAttributePrefixes.llm}.provider`;
/**
* The AI product as identified by the client or server
*/
exports.LLM_SYSTEM = `${exports.SemanticAttributePrefixes.llm}.system`;
/** Token count for the completion by the llm */

@@ -313,2 +321,4 @@ exports.LLM_TOKEN_COUNT_COMPLETION = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.token_count}.completion`;

LLM_TOKEN_COUNT_TOTAL: exports.LLM_TOKEN_COUNT_TOTAL,
LLM_SYSTEM: exports.LLM_SYSTEM,
LLM_PROVIDER: exports.LLM_PROVIDER,
LLM_TOOLS: exports.LLM_TOOLS,

@@ -372,2 +382,21 @@ MESSAGE_ROLE: exports.MESSAGE_ROLE,

})(MimeType || (exports.MimeType = MimeType = {}));
var LLMSystem;
(function (LLMSystem) {
LLMSystem["OPENAI"] = "openai";
LLMSystem["ANTHROPIC"] = "anthropic";
LLMSystem["MISTRALAI"] = "mistralai";
LLMSystem["COHERE"] = "cohere";
LLMSystem["VERTEXAI"] = "vertexai";
})(LLMSystem || (exports.LLMSystem = LLMSystem = {}));
var LLMProvider;
(function (LLMProvider) {
LLMProvider["OPENAI"] = "openai";
LLMProvider["ANTHROPIC"] = "anthropic";
LLMProvider["MISTRALAI"] = "mistralai";
LLMProvider["COHERE"] = "cohere";
// Cloud Providers of LLM systems
LLMProvider["GOOGLE"] = "google";
LLMProvider["AWS"] = "aws";
LLMProvider["AZURE"] = "azure";
})(LLMProvider || (exports.LLMProvider = LLMProvider = {}));
//# sourceMappingURL=SemanticConventions.js.map

2

package.json
{
"name": "@arizeai/openinference-semantic-conventions",
"version": "0.10.0",
"version": "0.11.0",
"private": false,

@@ -5,0 +5,0 @@ "main": "dist/src/index.js",

@@ -162,2 +162,13 @@ /**

/**
* The provider of the inferences. E.g. the cloud provider
*/
export const LLM_PROVIDER =
`${SemanticAttributePrefixes.llm}.provider` as const;
/**
* The AI product as identified by the client or server
*/
export const LLM_SYSTEM = `${SemanticAttributePrefixes.llm}.system` as const;
/** Token count for the completion by the llm */

@@ -417,2 +428,4 @@ export const LLM_TOKEN_COUNT_COMPLETION =

LLM_TOKEN_COUNT_TOTAL,
LLM_SYSTEM,
LLM_PROVIDER,
LLM_TOOLS,

@@ -476,1 +489,20 @@ MESSAGE_ROLE,

}
export enum LLMSystem {
OPENAI = "openai",
ANTHROPIC = "anthropic",
MISTRALAI = "mistralai",
COHERE = "cohere",
VERTEXAI = "vertexai",
}
export enum LLMProvider {
OPENAI = "openai",
ANTHROPIC = "anthropic",
MISTRALAI = "mistralai",
COHERE = "cohere",
// Cloud Providers of LLM systems
GOOGLE = "google",
AWS = "aws",
AZURE = "azure",
}

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet