@arizeai/openinference-semantic-conventions
Advanced tools
Comparing version
@@ -24,2 +24,3 @@ /** | ||
readonly audio: "audio"; | ||
readonly prompt: "prompt"; | ||
}; | ||
@@ -108,2 +109,7 @@ export declare const LLMAttributePostfixes: { | ||
}; | ||
export declare const PromptAttributePostfixes: { | ||
readonly vendor: "vendor"; | ||
readonly id: "id"; | ||
readonly url: "url"; | ||
}; | ||
/** | ||
@@ -155,4 +161,10 @@ * The input to any span | ||
export declare const LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion"; | ||
/** Token count for the reasoning steps in the completion */ | ||
export declare const LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: "llm.token_count.completion_details.reasoning"; | ||
/** Token count for the prompt to the llm */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT: "llm.token_count.prompt"; | ||
/** Token count for the tokens written to the cache */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: "llm.token_count.prompt_details.cache_write"; | ||
/** Token count for the tokens retrieved from the cache */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: "llm.token_count.prompt_details.cache_read"; | ||
/** Token count for the entire transaction with the llm */ | ||
@@ -333,2 +345,14 @@ export declare const LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total"; | ||
export declare const AUDIO_TRANSCRIPT: "audio.transcript"; | ||
/** | ||
* The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc. | ||
*/ | ||
export declare const PROMPT_VENDOR: "prompt.vendor"; | ||
/** | ||
* A vendor-specific id used to locate the prompt | ||
*/ | ||
export declare const PROMPT_ID: "prompt.id"; | ||
/** | ||
* A vendor-specific URL used to locate the prompt | ||
*/ | ||
export declare const PROMPT_URL: "prompt.url"; | ||
export declare const SemanticConventions: { | ||
@@ -346,3 +370,6 @@ readonly IMAGE_URL: "image.url"; | ||
readonly LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion"; | ||
readonly LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: "llm.token_count.completion_details.reasoning"; | ||
readonly LLM_TOKEN_COUNT_PROMPT: "llm.token_count.prompt"; | ||
readonly LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: "llm.token_count.prompt_details.cache_write"; | ||
readonly LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: "llm.token_count.prompt_details.cache_read"; | ||
readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total"; | ||
@@ -393,2 +420,5 @@ readonly LLM_SYSTEM: "llm.system"; | ||
readonly OPENINFERENCE_SPAN_KIND: "openinference.span.kind"; | ||
readonly PROMPT_VENDOR: "prompt.vendor"; | ||
readonly PROMPT_ID: "prompt.id"; | ||
readonly PROMPT_URL: "prompt.url"; | ||
}; | ||
@@ -395,0 +425,0 @@ export declare enum OpenInferenceSpanKind { |
@@ -24,2 +24,3 @@ /** | ||
audio: "audio", | ||
prompt: "prompt", | ||
}; | ||
@@ -108,2 +109,7 @@ export const LLMAttributePostfixes = { | ||
}; | ||
export const PromptAttributePostfixes = { | ||
vendor: "vendor", | ||
id: "id", | ||
url: "url", | ||
}; | ||
/** | ||
@@ -155,4 +161,10 @@ * The input to any span | ||
export const LLM_TOKEN_COUNT_COMPLETION = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.completion`; | ||
/** Token count for the reasoning steps in the completion */ | ||
export const LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.completion_details.reasoning`; | ||
/** Token count for the prompt to the llm */ | ||
export const LLM_TOKEN_COUNT_PROMPT = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.prompt`; | ||
/** Token count for the tokens written to the cache */ | ||
export const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.prompt_details.cache_write`; | ||
/** Token count for the tokens retrieved from the cache */ | ||
export const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.prompt_details.cache_read`; | ||
/** Token count for the entire transaction with the llm */ | ||
@@ -334,2 +346,14 @@ export const LLM_TOKEN_COUNT_TOTAL = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.total`; | ||
export const AUDIO_TRANSCRIPT = `${SemanticAttributePrefixes.audio}.${AudioAttributesPostfixes.transcript}`; | ||
/** | ||
* The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc. | ||
*/ | ||
export const PROMPT_VENDOR = `${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.vendor}`; | ||
/** | ||
* A vendor-specific id used to locate the prompt | ||
*/ | ||
export const PROMPT_ID = `${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.id}`; | ||
/** | ||
* A vendor-specific URL used to locate the prompt | ||
*/ | ||
export const PROMPT_URL = `${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.url}`; | ||
export const SemanticConventions = { | ||
@@ -347,3 +371,6 @@ IMAGE_URL, | ||
LLM_TOKEN_COUNT_COMPLETION, | ||
LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING, | ||
LLM_TOKEN_COUNT_PROMPT, | ||
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE, | ||
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ, | ||
LLM_TOKEN_COUNT_TOTAL, | ||
@@ -394,2 +421,5 @@ LLM_SYSTEM, | ||
OPENINFERENCE_SPAN_KIND: `${SemanticAttributePrefixes.openinference}.span.kind`, | ||
PROMPT_VENDOR, | ||
PROMPT_ID, | ||
PROMPT_URL, | ||
}; | ||
@@ -396,0 +426,0 @@ export var OpenInferenceSpanKind; |
@@ -24,2 +24,3 @@ /** | ||
readonly audio: "audio"; | ||
readonly prompt: "prompt"; | ||
}; | ||
@@ -108,2 +109,7 @@ export declare const LLMAttributePostfixes: { | ||
}; | ||
export declare const PromptAttributePostfixes: { | ||
readonly vendor: "vendor"; | ||
readonly id: "id"; | ||
readonly url: "url"; | ||
}; | ||
/** | ||
@@ -155,4 +161,10 @@ * The input to any span | ||
export declare const LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion"; | ||
/** Token count for the reasoning steps in the completion */ | ||
export declare const LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: "llm.token_count.completion_details.reasoning"; | ||
/** Token count for the prompt to the llm */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT: "llm.token_count.prompt"; | ||
/** Token count for the tokens written to the cache */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: "llm.token_count.prompt_details.cache_write"; | ||
/** Token count for the tokens retrieved from the cache */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: "llm.token_count.prompt_details.cache_read"; | ||
/** Token count for the entire transaction with the llm */ | ||
@@ -333,2 +345,14 @@ export declare const LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total"; | ||
export declare const AUDIO_TRANSCRIPT: "audio.transcript"; | ||
/** | ||
* The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc. | ||
*/ | ||
export declare const PROMPT_VENDOR: "prompt.vendor"; | ||
/** | ||
* A vendor-specific id used to locate the prompt | ||
*/ | ||
export declare const PROMPT_ID: "prompt.id"; | ||
/** | ||
* A vendor-specific URL used to locate the prompt | ||
*/ | ||
export declare const PROMPT_URL: "prompt.url"; | ||
export declare const SemanticConventions: { | ||
@@ -346,3 +370,6 @@ readonly IMAGE_URL: "image.url"; | ||
readonly LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion"; | ||
readonly LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: "llm.token_count.completion_details.reasoning"; | ||
readonly LLM_TOKEN_COUNT_PROMPT: "llm.token_count.prompt"; | ||
readonly LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: "llm.token_count.prompt_details.cache_write"; | ||
readonly LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: "llm.token_count.prompt_details.cache_read"; | ||
readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total"; | ||
@@ -393,2 +420,5 @@ readonly LLM_SYSTEM: "llm.system"; | ||
readonly OPENINFERENCE_SPAN_KIND: "openinference.span.kind"; | ||
readonly PROMPT_VENDOR: "prompt.vendor"; | ||
readonly PROMPT_ID: "prompt.id"; | ||
readonly PROMPT_URL: "prompt.url"; | ||
}; | ||
@@ -395,0 +425,0 @@ export declare enum OpenInferenceSpanKind { |
@@ -24,2 +24,3 @@ /** | ||
audio: "audio", | ||
prompt: "prompt", | ||
}; | ||
@@ -108,2 +109,7 @@ export const LLMAttributePostfixes = { | ||
}; | ||
export const PromptAttributePostfixes = { | ||
vendor: "vendor", | ||
id: "id", | ||
url: "url", | ||
}; | ||
/** | ||
@@ -155,4 +161,10 @@ * The input to any span | ||
export const LLM_TOKEN_COUNT_COMPLETION = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.completion`; | ||
/** Token count for the reasoning steps in the completion */ | ||
export const LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.completion_details.reasoning`; | ||
/** Token count for the prompt to the llm */ | ||
export const LLM_TOKEN_COUNT_PROMPT = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.prompt`; | ||
/** Token count for the tokens written to the cache */ | ||
export const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.prompt_details.cache_write`; | ||
/** Token count for the tokens retrieved from the cache */ | ||
export const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.prompt_details.cache_read`; | ||
/** Token count for the entire transaction with the llm */ | ||
@@ -334,2 +346,14 @@ export const LLM_TOKEN_COUNT_TOTAL = `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.total`; | ||
export const AUDIO_TRANSCRIPT = `${SemanticAttributePrefixes.audio}.${AudioAttributesPostfixes.transcript}`; | ||
/** | ||
* The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc. | ||
*/ | ||
export const PROMPT_VENDOR = `${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.vendor}`; | ||
/** | ||
* A vendor-specific id used to locate the prompt | ||
*/ | ||
export const PROMPT_ID = `${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.id}`; | ||
/** | ||
* A vendor-specific URL used to locate the prompt | ||
*/ | ||
export const PROMPT_URL = `${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.url}`; | ||
export const SemanticConventions = { | ||
@@ -347,3 +371,6 @@ IMAGE_URL, | ||
LLM_TOKEN_COUNT_COMPLETION, | ||
LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING, | ||
LLM_TOKEN_COUNT_PROMPT, | ||
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE, | ||
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ, | ||
LLM_TOKEN_COUNT_TOTAL, | ||
@@ -394,2 +421,5 @@ LLM_SYSTEM, | ||
OPENINFERENCE_SPAN_KIND: `${SemanticAttributePrefixes.openinference}.span.kind`, | ||
PROMPT_VENDOR, | ||
PROMPT_ID, | ||
PROMPT_URL, | ||
}; | ||
@@ -396,0 +426,0 @@ export var OpenInferenceSpanKind; |
@@ -24,2 +24,3 @@ /** | ||
readonly audio: "audio"; | ||
readonly prompt: "prompt"; | ||
}; | ||
@@ -108,2 +109,7 @@ export declare const LLMAttributePostfixes: { | ||
}; | ||
export declare const PromptAttributePostfixes: { | ||
readonly vendor: "vendor"; | ||
readonly id: "id"; | ||
readonly url: "url"; | ||
}; | ||
/** | ||
@@ -155,4 +161,10 @@ * The input to any span | ||
export declare const LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion"; | ||
/** Token count for the reasoning steps in the completion */ | ||
export declare const LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: "llm.token_count.completion_details.reasoning"; | ||
/** Token count for the prompt to the llm */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT: "llm.token_count.prompt"; | ||
/** Token count for the tokens written to the cache */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: "llm.token_count.prompt_details.cache_write"; | ||
/** Token count for the tokens retrieved from the cache */ | ||
export declare const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: "llm.token_count.prompt_details.cache_read"; | ||
/** Token count for the entire transaction with the llm */ | ||
@@ -333,2 +345,14 @@ export declare const LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total"; | ||
export declare const AUDIO_TRANSCRIPT: "audio.transcript"; | ||
/** | ||
* The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc. | ||
*/ | ||
export declare const PROMPT_VENDOR: "prompt.vendor"; | ||
/** | ||
* A vendor-specific id used to locate the prompt | ||
*/ | ||
export declare const PROMPT_ID: "prompt.id"; | ||
/** | ||
* A vendor-specific URL used to locate the prompt | ||
*/ | ||
export declare const PROMPT_URL: "prompt.url"; | ||
export declare const SemanticConventions: { | ||
@@ -346,3 +370,6 @@ readonly IMAGE_URL: "image.url"; | ||
readonly LLM_TOKEN_COUNT_COMPLETION: "llm.token_count.completion"; | ||
readonly LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: "llm.token_count.completion_details.reasoning"; | ||
readonly LLM_TOKEN_COUNT_PROMPT: "llm.token_count.prompt"; | ||
readonly LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: "llm.token_count.prompt_details.cache_write"; | ||
readonly LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: "llm.token_count.prompt_details.cache_read"; | ||
readonly LLM_TOKEN_COUNT_TOTAL: "llm.token_count.total"; | ||
@@ -393,2 +420,5 @@ readonly LLM_SYSTEM: "llm.system"; | ||
readonly OPENINFERENCE_SPAN_KIND: "openinference.span.kind"; | ||
readonly PROMPT_VENDOR: "prompt.vendor"; | ||
readonly PROMPT_ID: "prompt.id"; | ||
readonly PROMPT_URL: "prompt.url"; | ||
}; | ||
@@ -395,0 +425,0 @@ export declare enum OpenInferenceSpanKind { |
@@ -6,4 +6,4 @@ "use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.EMBEDDING_TEXT = exports.DOCUMENT_METADATA = exports.DOCUMENT_SCORE = exports.DOCUMENT_CONTENT = exports.DOCUMENT_ID = exports.IMAGE_URL = exports.MESSAGE_CONTENT_IMAGE = exports.MESSAGE_CONTENT_TEXT = exports.MESSAGE_CONTENT_TYPE = exports.MESSAGE_CONTENTS = exports.MESSAGE_CONTENT = exports.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = exports.MESSAGE_FUNCTION_CALL_NAME = exports.TOOL_CALL_ID = exports.TOOL_CALL_FUNCTION_ARGUMENTS_JSON = exports.TOOL_CALL_FUNCTION_NAME = exports.MESSAGE_TOOL_CALL_ID = exports.MESSAGE_TOOL_CALLS = exports.MESSAGE_NAME = exports.MESSAGE_ROLE = exports.LLM_TOKEN_COUNT_TOTAL = exports.LLM_TOKEN_COUNT_PROMPT = exports.LLM_TOKEN_COUNT_COMPLETION = exports.LLM_SYSTEM = exports.LLM_PROVIDER = exports.LLM_MODEL_NAME = exports.LLM_OUTPUT_MESSAGES = exports.LLM_INVOCATION_PARAMETERS = exports.LLM_PROMPTS = exports.LLM_INPUT_MESSAGES = exports.OUTPUT_MIME_TYPE = exports.OUTPUT_VALUE = exports.INPUT_MIME_TYPE = exports.INPUT_VALUE = exports.AudioAttributesPostfixes = exports.UserAttributePostfixes = exports.SessionAttributePostfixes = exports.TagAttributePostfixes = exports.DocumentAttributePostfixes = exports.ToolCallAttributePostfixes = exports.ImageAttributesPostfixes = exports.MessageContentsAttributePostfixes = exports.MessageAttributePostfixes = exports.ToolAttributePostfixes = exports.EmbeddingAttributePostfixes = exports.RerankerAttributePostfixes = exports.RetrievalAttributePostfixes = exports.LLMPromptTemplateAttributePostfixes = exports.LLMAttributePostfixes = exports.SemanticAttributePrefixes = void 0; | ||
exports.LLMProvider = exports.LLMSystem = exports.MimeType = exports.OpenInferenceSpanKind = exports.SemanticConventions = exports.AUDIO_TRANSCRIPT = exports.AUDIO_MIME_TYPE = exports.AUDIO_URL = exports.TAG_TAGS = exports.PROMPT_TEMPLATE_VERSION = exports.METADATA = exports.RERANKER_TOP_K = exports.RERANKER_MODEL_NAME = exports.RERANKER_QUERY = exports.RERANKER_OUTPUT_DOCUMENTS = exports.RERANKER_INPUT_DOCUMENTS = exports.USER_ID = exports.SESSION_ID = exports.TOOL_JSON_SCHEMA = exports.TOOL_PARAMETERS = exports.TOOL_DESCRIPTION = exports.TOOL_NAME = exports.LLM_TOOLS = exports.LLM_FUNCTION_CALL = exports.PROMPT_TEMPLATE_TEMPLATE = exports.PROMPT_TEMPLATE_VARIABLES = exports.RETRIEVAL_DOCUMENTS = exports.EMBEDDING_EMBEDDINGS = exports.EMBEDDING_VECTOR = exports.EMBEDDING_MODEL_NAME = void 0; | ||
exports.DOCUMENT_ID = exports.IMAGE_URL = exports.MESSAGE_CONTENT_IMAGE = exports.MESSAGE_CONTENT_TEXT = exports.MESSAGE_CONTENT_TYPE = exports.MESSAGE_CONTENTS = exports.MESSAGE_CONTENT = exports.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = exports.MESSAGE_FUNCTION_CALL_NAME = exports.TOOL_CALL_ID = exports.TOOL_CALL_FUNCTION_ARGUMENTS_JSON = exports.TOOL_CALL_FUNCTION_NAME = exports.MESSAGE_TOOL_CALL_ID = exports.MESSAGE_TOOL_CALLS = exports.MESSAGE_NAME = exports.MESSAGE_ROLE = exports.LLM_TOKEN_COUNT_TOTAL = exports.LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ = exports.LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE = exports.LLM_TOKEN_COUNT_PROMPT = exports.LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING = exports.LLM_TOKEN_COUNT_COMPLETION = exports.LLM_SYSTEM = exports.LLM_PROVIDER = exports.LLM_MODEL_NAME = exports.LLM_OUTPUT_MESSAGES = exports.LLM_INVOCATION_PARAMETERS = exports.LLM_PROMPTS = exports.LLM_INPUT_MESSAGES = exports.OUTPUT_MIME_TYPE = exports.OUTPUT_VALUE = exports.INPUT_MIME_TYPE = exports.INPUT_VALUE = exports.PromptAttributePostfixes = exports.AudioAttributesPostfixes = exports.UserAttributePostfixes = exports.SessionAttributePostfixes = exports.TagAttributePostfixes = exports.DocumentAttributePostfixes = exports.ToolCallAttributePostfixes = exports.ImageAttributesPostfixes = exports.MessageContentsAttributePostfixes = exports.MessageAttributePostfixes = exports.ToolAttributePostfixes = exports.EmbeddingAttributePostfixes = exports.RerankerAttributePostfixes = exports.RetrievalAttributePostfixes = exports.LLMPromptTemplateAttributePostfixes = exports.LLMAttributePostfixes = exports.SemanticAttributePrefixes = void 0; | ||
exports.LLMProvider = exports.LLMSystem = exports.MimeType = exports.OpenInferenceSpanKind = exports.SemanticConventions = exports.PROMPT_URL = exports.PROMPT_ID = exports.PROMPT_VENDOR = exports.AUDIO_TRANSCRIPT = exports.AUDIO_MIME_TYPE = exports.AUDIO_URL = exports.TAG_TAGS = exports.PROMPT_TEMPLATE_VERSION = exports.METADATA = exports.RERANKER_TOP_K = exports.RERANKER_MODEL_NAME = exports.RERANKER_QUERY = exports.RERANKER_OUTPUT_DOCUMENTS = exports.RERANKER_INPUT_DOCUMENTS = exports.USER_ID = exports.SESSION_ID = exports.TOOL_JSON_SCHEMA = exports.TOOL_PARAMETERS = exports.TOOL_DESCRIPTION = exports.TOOL_NAME = exports.LLM_TOOLS = exports.LLM_FUNCTION_CALL = exports.PROMPT_TEMPLATE_TEMPLATE = exports.PROMPT_TEMPLATE_VARIABLES = exports.RETRIEVAL_DOCUMENTS = exports.EMBEDDING_EMBEDDINGS = exports.EMBEDDING_VECTOR = exports.EMBEDDING_MODEL_NAME = exports.EMBEDDING_TEXT = exports.DOCUMENT_METADATA = exports.DOCUMENT_SCORE = exports.DOCUMENT_CONTENT = void 0; | ||
exports.SemanticAttributePrefixes = { | ||
@@ -29,2 +29,3 @@ input: "input", | ||
audio: "audio", | ||
prompt: "prompt", | ||
}; | ||
@@ -113,2 +114,7 @@ exports.LLMAttributePostfixes = { | ||
}; | ||
exports.PromptAttributePostfixes = { | ||
vendor: "vendor", | ||
id: "id", | ||
url: "url", | ||
}; | ||
/** | ||
@@ -160,4 +166,10 @@ * The input to any span | ||
exports.LLM_TOKEN_COUNT_COMPLETION = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.token_count}.completion`; | ||
/** Token count for the reasoning steps in the completion */ | ||
exports.LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.token_count}.completion_details.reasoning`; | ||
/** Token count for the prompt to the llm */ | ||
exports.LLM_TOKEN_COUNT_PROMPT = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.token_count}.prompt`; | ||
/** Token count for the tokens written to the cache */ | ||
exports.LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.token_count}.prompt_details.cache_write`; | ||
/** Token count for the tokens retrieved from the cache */ | ||
exports.LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.token_count}.prompt_details.cache_read`; | ||
/** Token count for the entire transaction with the llm */ | ||
@@ -339,2 +351,14 @@ exports.LLM_TOKEN_COUNT_TOTAL = `${exports.SemanticAttributePrefixes.llm}.${exports.LLMAttributePostfixes.token_count}.total`; | ||
exports.AUDIO_TRANSCRIPT = `${exports.SemanticAttributePrefixes.audio}.${exports.AudioAttributesPostfixes.transcript}`; | ||
/** | ||
* The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc. | ||
*/ | ||
exports.PROMPT_VENDOR = `${exports.SemanticAttributePrefixes.prompt}.${exports.PromptAttributePostfixes.vendor}`; | ||
/** | ||
* A vendor-specific id used to locate the prompt | ||
*/ | ||
exports.PROMPT_ID = `${exports.SemanticAttributePrefixes.prompt}.${exports.PromptAttributePostfixes.id}`; | ||
/** | ||
* A vendor-specific URL used to locate the prompt | ||
*/ | ||
exports.PROMPT_URL = `${exports.SemanticAttributePrefixes.prompt}.${exports.PromptAttributePostfixes.url}`; | ||
exports.SemanticConventions = { | ||
@@ -352,3 +376,6 @@ IMAGE_URL: exports.IMAGE_URL, | ||
LLM_TOKEN_COUNT_COMPLETION: exports.LLM_TOKEN_COUNT_COMPLETION, | ||
LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING: exports.LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING, | ||
LLM_TOKEN_COUNT_PROMPT: exports.LLM_TOKEN_COUNT_PROMPT, | ||
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE: exports.LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE, | ||
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ: exports.LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ, | ||
LLM_TOKEN_COUNT_TOTAL: exports.LLM_TOKEN_COUNT_TOTAL, | ||
@@ -399,2 +426,5 @@ LLM_SYSTEM: exports.LLM_SYSTEM, | ||
OPENINFERENCE_SPAN_KIND: `${exports.SemanticAttributePrefixes.openinference}.span.kind`, | ||
PROMPT_VENDOR: exports.PROMPT_VENDOR, | ||
PROMPT_ID: exports.PROMPT_ID, | ||
PROMPT_URL: exports.PROMPT_URL, | ||
}; | ||
@@ -401,0 +431,0 @@ var OpenInferenceSpanKind; |
{ | ||
"name": "@arizeai/openinference-semantic-conventions", | ||
"version": "1.0.0", | ||
"version": "1.0.1", | ||
"private": false, | ||
@@ -5,0 +5,0 @@ "main": "dist/src/index.js", |
@@ -25,2 +25,3 @@ /** | ||
audio: "audio", | ||
prompt: "prompt", | ||
} as const; | ||
@@ -125,2 +126,8 @@ | ||
export const PromptAttributePostfixes = { | ||
vendor: "vendor", | ||
id: "id", | ||
url: "url", | ||
} as const; | ||
/** | ||
@@ -191,2 +198,6 @@ * The input to any span | ||
/** Token count for the reasoning steps in the completion */ | ||
export const LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING = | ||
`${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.completion_details.reasoning` as const; | ||
/** Token count for the prompt to the llm */ | ||
@@ -196,2 +207,10 @@ export const LLM_TOKEN_COUNT_PROMPT = | ||
/** Token count for the tokens written to the cache */ | ||
export const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE = | ||
`${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.prompt_details.cache_write` as const; | ||
/** Token count for the tokens retrieved from the cache */ | ||
export const LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ = | ||
`${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.token_count}.prompt_details.cache_read` as const; | ||
/** Token count for the entire transaction with the llm */ | ||
@@ -459,2 +478,20 @@ export const LLM_TOKEN_COUNT_TOTAL = | ||
/** | ||
* The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc. | ||
*/ | ||
export const PROMPT_VENDOR = | ||
`${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.vendor}` as const; | ||
/** | ||
* A vendor-specific id used to locate the prompt | ||
*/ | ||
export const PROMPT_ID = | ||
`${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.id}` as const; | ||
/** | ||
* A vendor-specific URL used to locate the prompt | ||
*/ | ||
export const PROMPT_URL = | ||
`${SemanticAttributePrefixes.prompt}.${PromptAttributePostfixes.url}` as const; | ||
export const SemanticConventions = { | ||
@@ -472,3 +509,6 @@ IMAGE_URL, | ||
LLM_TOKEN_COUNT_COMPLETION, | ||
LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING, | ||
LLM_TOKEN_COUNT_PROMPT, | ||
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE, | ||
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ, | ||
LLM_TOKEN_COUNT_TOTAL, | ||
@@ -519,2 +559,5 @@ LLM_SYSTEM, | ||
OPENINFERENCE_SPAN_KIND: `${SemanticAttributePrefixes.openinference}.span.kind`, | ||
PROMPT_VENDOR, | ||
PROMPT_ID, | ||
PROMPT_URL, | ||
} as const; | ||
@@ -521,0 +564,0 @@ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
226240
6.71%3379
6.9%