sql-formatter
Advanced tools
Comparing version 7.0.0 to 7.0.1
@@ -10,4 +10,2 @@ "use strict"; | ||
var _Tokenizer = require("./Tokenizer"); | ||
function _slicedToArray(arr, i) { return _arrayWithHoles(arr) || _iterableToArrayLimit(arr, i) || _unsupportedIterableToArray(arr, i) || _nonIterableRest(); } | ||
@@ -25,5 +23,7 @@ | ||
var WHITESPACE_REGEX = /^([\t-\r \xA0\u1680\u2000-\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF]+)/; | ||
/** | ||
* Handles comma placement - either before, after or tabulated | ||
*/ | ||
function formatCommaPositions(query, commaPosition, indent) { | ||
@@ -100,3 +100,3 @@ return groupCommaDelimitedLines(query.split('\n')).flatMap(function (commaLines) { | ||
var _ref = line.match(_Tokenizer.WHITESPACE_REGEX) || [''], | ||
var _ref = line.match(WHITESPACE_REGEX) || [''], | ||
_ref2 = _slicedToArray(_ref, 1), | ||
@@ -103,0 +103,0 @@ whitespace = _ref2[0]; |
@@ -47,5 +47,5 @@ "use strict"; | ||
var createLineCommentRegex = function createLineCommentRegex(lineCommentTypes) { | ||
return new RegExp("^((?:".concat(lineCommentTypes.map(function (c) { | ||
return new RegExp("((?:".concat(lineCommentTypes.map(function (c) { | ||
return (0, _utils.escapeRegExp)(c); | ||
}).join('|'), ").*?)(?:\r\n|\r|\n|$)"), 'u'); | ||
}).join('|'), ").*?)(?:\r\n|\r|\n|$)"), 'uy'); | ||
}; | ||
@@ -63,3 +63,3 @@ /** | ||
if (reservedKeywords.length === 0) { | ||
return /^\b$/; | ||
return new RegExp("\\b$", "y"); | ||
} | ||
@@ -69,3 +69,3 @@ | ||
var reservedKeywordsPattern = (0, _utils.sortByLengthDesc)(reservedKeywords).join('|').replace(/ /g, '\\s+'); | ||
return new RegExp("^(".concat(reservedKeywordsPattern, ")").concat(avoidIdentChars, "\\b"), 'iu'); | ||
return new RegExp("(".concat(reservedKeywordsPattern, ")").concat(avoidIdentChars, "\\b"), 'iuy'); | ||
}; // Negative lookahead to avoid matching a keyword that's actually part of identifier, | ||
@@ -219,4 +219,4 @@ // which can happen when identifier allows word-boundary characters inside it. | ||
var patternToRegex = function patternToRegex(pattern) { | ||
return new RegExp('^(' + pattern + ')', 'u'); | ||
return new RegExp('(' + pattern + ')', 'uy'); | ||
}; | ||
//# sourceMappingURL=regexFactory.js.map |
@@ -8,3 +8,3 @@ "use strict"; | ||
}); | ||
exports["default"] = exports.WHITESPACE_REGEX = void 0; | ||
exports["default"] = void 0; | ||
@@ -47,5 +47,15 @@ var _utils = require("../utils"); | ||
var WHITESPACE_REGEX = /^([\t-\r \xA0\u1680\u2000-\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF]+)/; | ||
exports.WHITESPACE_REGEX = WHITESPACE_REGEX; | ||
var NULL_REGEX = /(?!)/; // zero-width negative lookahead, matches nothing | ||
// A note about regular expressions | ||
// | ||
// We're using a sticky flag "y" in all tokenizing regexes. | ||
// This works a bit like ^, anchoring the regex to the start, | ||
// but when ^ anchores the regex to the start of string (or line), | ||
// the sticky flag anchors it to search start position, which we | ||
// can change by setting RegExp.lastIndex. | ||
// | ||
// This allows us to avoid slicing off tokens from the start of input string | ||
// (which we used in the past) and just move the match start position forward, | ||
// which is much more performant on long strings. | ||
var WHITESPACE_REGEX = new RegExp("([\\t-\\r \\xA0\\u1680\\u2000-\\u200A\\u2028\\u2029\\u202F\\u205F\\u3000\\uFEFF]+)", "y"); | ||
var NULL_REGEX = new RegExp("(?!)", "y"); // zero-width negative lookahead, matches nothing | ||
@@ -60,2 +70,4 @@ var toCanonicalKeyword = function toCanonicalKeyword(text) { | ||
var Tokenizer = /*#__PURE__*/function () { | ||
// The input SQL string to process | ||
// Current position in string | ||
function Tokenizer(cfg) { | ||
@@ -83,2 +95,6 @@ var _cfg$reservedDependen, | ||
_defineProperty(this, "input", ''); | ||
_defineProperty(this, "index", 0); | ||
_defineProperty(this, "preprocess", function (tokens) { | ||
@@ -93,3 +109,3 @@ return tokens; | ||
this.quotedIdentRegex = regexFactory.createQuoteRegex(cfg.identTypes); | ||
this.REGEX_MAP = (_this$REGEX_MAP = {}, _defineProperty(_this$REGEX_MAP, _token.TokenType.IDENT, regexFactory.createIdentRegex(cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.STRING, regexFactory.createQuoteRegex(cfg.stringTypes)), _defineProperty(_this$REGEX_MAP, _token.TokenType.VARIABLE, cfg.variableTypes ? regexFactory.createVariableRegex(cfg.variableTypes) : NULL_REGEX), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_KEYWORD, regexFactory.createReservedWordRegex(cfg.reservedKeywords, cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_DEPENDENT_CLAUSE, regexFactory.createReservedWordRegex((_cfg$reservedDependen = cfg.reservedDependentClauses) !== null && _cfg$reservedDependen !== void 0 ? _cfg$reservedDependen : [], cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_LOGICAL_OPERATOR, regexFactory.createReservedWordRegex((_cfg$reservedLogicalO = cfg.reservedLogicalOperators) !== null && _cfg$reservedLogicalO !== void 0 ? _cfg$reservedLogicalO : ['AND', 'OR'], cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_COMMAND, regexFactory.createReservedWordRegex(cfg.reservedCommands, cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_BINARY_COMMAND, regexFactory.createReservedWordRegex(cfg.reservedBinaryCommands, cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_JOIN_CONDITION, regexFactory.createReservedWordRegex((_cfg$reservedJoinCond = cfg.reservedJoinConditions) !== null && _cfg$reservedJoinCond !== void 0 ? _cfg$reservedJoinCond : ['ON', 'USING'], cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.OPERATOR, regexFactory.createOperatorRegex('+-/*%&|^><=.,;[]{}`:$@', ['<>', '<=', '>=', '!='].concat(_toConsumableArray((_cfg$operators = cfg.operators) !== null && _cfg$operators !== void 0 ? _cfg$operators : [])))), _defineProperty(_this$REGEX_MAP, _token.TokenType.BLOCK_START, regexFactory.createParenRegex((_cfg$blockStart = cfg.blockStart) !== null && _cfg$blockStart !== void 0 ? _cfg$blockStart : ['('])), _defineProperty(_this$REGEX_MAP, _token.TokenType.BLOCK_END, regexFactory.createParenRegex((_cfg$blockEnd = cfg.blockEnd) !== null && _cfg$blockEnd !== void 0 ? _cfg$blockEnd : [')'])), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_CASE_START, /^(CA[S\u017F]E)\b/i), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_CASE_END, /^(END)\b/i), _defineProperty(_this$REGEX_MAP, _token.TokenType.LINE_COMMENT, regexFactory.createLineCommentRegex((_cfg$lineCommentTypes = cfg.lineCommentTypes) !== null && _cfg$lineCommentTypes !== void 0 ? _cfg$lineCommentTypes : ['--'])), _defineProperty(_this$REGEX_MAP, _token.TokenType.BLOCK_COMMENT, /^(\/\*(?:(?![])[\s\S])*?(?:\*\/|$))/), _defineProperty(_this$REGEX_MAP, _token.TokenType.NUMBER, /^(0x[0-9A-Fa-f]+|0b[01]+|(\x2D[\t-\r \xA0\u1680\u2000-\u200A\u2028\u2029\u202F\u205F\u3000\uFEFF]*)?[0-9]+(\.[0-9]*)?([Ee][\+\x2D]?[0-9]+(\.[0-9]+)?)?)/), _defineProperty(_this$REGEX_MAP, _token.TokenType.PARAMETER, NULL_REGEX), _defineProperty(_this$REGEX_MAP, _token.TokenType.EOF, NULL_REGEX), _this$REGEX_MAP); | ||
this.REGEX_MAP = (_this$REGEX_MAP = {}, _defineProperty(_this$REGEX_MAP, _token.TokenType.IDENT, regexFactory.createIdentRegex(cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.STRING, regexFactory.createQuoteRegex(cfg.stringTypes)), _defineProperty(_this$REGEX_MAP, _token.TokenType.VARIABLE, cfg.variableTypes ? regexFactory.createVariableRegex(cfg.variableTypes) : NULL_REGEX), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_KEYWORD, regexFactory.createReservedWordRegex(cfg.reservedKeywords, cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_DEPENDENT_CLAUSE, regexFactory.createReservedWordRegex((_cfg$reservedDependen = cfg.reservedDependentClauses) !== null && _cfg$reservedDependen !== void 0 ? _cfg$reservedDependen : [], cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_LOGICAL_OPERATOR, regexFactory.createReservedWordRegex((_cfg$reservedLogicalO = cfg.reservedLogicalOperators) !== null && _cfg$reservedLogicalO !== void 0 ? _cfg$reservedLogicalO : ['AND', 'OR'], cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_COMMAND, regexFactory.createReservedWordRegex(cfg.reservedCommands, cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_BINARY_COMMAND, regexFactory.createReservedWordRegex(cfg.reservedBinaryCommands, cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_JOIN_CONDITION, regexFactory.createReservedWordRegex((_cfg$reservedJoinCond = cfg.reservedJoinConditions) !== null && _cfg$reservedJoinCond !== void 0 ? _cfg$reservedJoinCond : ['ON', 'USING'], cfg.identChars)), _defineProperty(_this$REGEX_MAP, _token.TokenType.OPERATOR, regexFactory.createOperatorRegex('+-/*%&|^><=.,;[]{}`:$@', ['<>', '<=', '>=', '!='].concat(_toConsumableArray((_cfg$operators = cfg.operators) !== null && _cfg$operators !== void 0 ? _cfg$operators : [])))), _defineProperty(_this$REGEX_MAP, _token.TokenType.BLOCK_START, regexFactory.createParenRegex((_cfg$blockStart = cfg.blockStart) !== null && _cfg$blockStart !== void 0 ? _cfg$blockStart : ['('])), _defineProperty(_this$REGEX_MAP, _token.TokenType.BLOCK_END, regexFactory.createParenRegex((_cfg$blockEnd = cfg.blockEnd) !== null && _cfg$blockEnd !== void 0 ? _cfg$blockEnd : [')'])), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_CASE_START, new RegExp("(CA[S\\u017F]E)\\b", "iy")), _defineProperty(_this$REGEX_MAP, _token.TokenType.RESERVED_CASE_END, new RegExp("(END)\\b", "iy")), _defineProperty(_this$REGEX_MAP, _token.TokenType.LINE_COMMENT, regexFactory.createLineCommentRegex((_cfg$lineCommentTypes = cfg.lineCommentTypes) !== null && _cfg$lineCommentTypes !== void 0 ? _cfg$lineCommentTypes : ['--'])), _defineProperty(_this$REGEX_MAP, _token.TokenType.BLOCK_COMMENT, new RegExp("(\\/\\*(?:(?![])[\\s\\S])*?(?:\\*\\/|$))", "y")), _defineProperty(_this$REGEX_MAP, _token.TokenType.NUMBER, new RegExp("(0x[0-9A-Fa-f]+|0b[01]+|(\\x2D[\\t-\\r \\xA0\\u1680\\u2000-\\u200A\\u2028\\u2029\\u202F\\u205F\\u3000\\uFEFF]*)?[0-9]+(\\.[0-9]*)?([Ee][\\+\\x2D]?[0-9]+(\\.[0-9]+)?)?)", "y")), _defineProperty(_this$REGEX_MAP, _token.TokenType.PARAMETER, NULL_REGEX), _defineProperty(_this$REGEX_MAP, _token.TokenType.EOF, NULL_REGEX), _this$REGEX_MAP); | ||
this.paramPatterns = this.excludePatternsWithoutRegexes([{ | ||
@@ -118,3 +134,3 @@ // :name placeholders | ||
// ? placeholders | ||
regex: cfg.positionalParams ? /^(\?)/ : undefined, | ||
regex: cfg.positionalParams ? new RegExp("(\\?)", "y") : undefined, | ||
parseKey: function parseKey(v) { | ||
@@ -144,20 +160,19 @@ return v.slice(1); | ||
value: function tokenize(input) { | ||
this.input = input; | ||
this.index = 0; | ||
var tokens = []; | ||
var token; // Keep processing the string until it is empty | ||
var token; // Keep processing the string until end is reached | ||
while (input.length) { | ||
while (this.index < this.input.length) { | ||
// grab any preceding whitespace | ||
var whitespaceBefore = this.getWhitespace(input); | ||
input = input.substring(whitespaceBefore.length); | ||
var whitespaceBefore = this.getWhitespace(); | ||
if (input.length) { | ||
if (this.index < this.input.length) { | ||
// Get the next token and the token type | ||
token = this.getNextToken(input, token); | ||
token = this.getNextToken(token); | ||
if (!token) { | ||
throw new Error("Parse error: Unexpected \"".concat(input.slice(0, 100), "\"")); | ||
} // Advance the string | ||
throw new Error("Parse error: Unexpected \"".concat(input.slice(this.index, 100), "\"")); | ||
} | ||
input = input.substring(token.text.length); | ||
tokens.push(_objectSpread(_objectSpread({}, token), {}, { | ||
@@ -171,25 +186,24 @@ whitespaceBefore: whitespaceBefore | ||
} | ||
/** Matches preceding whitespace if present */ | ||
}, { | ||
key: "getWhitespace", | ||
value: function getWhitespace(input) { | ||
var matches = input.match(WHITESPACE_REGEX); | ||
return matches ? matches[1] : ''; | ||
value: function getWhitespace() { | ||
WHITESPACE_REGEX.lastIndex = this.index; | ||
var matches = this.input.match(WHITESPACE_REGEX); | ||
if (matches) { | ||
// Advance current position by matched whitespace length | ||
this.index += matches[1].length; | ||
return matches[1]; | ||
} else { | ||
return ''; | ||
} | ||
} | ||
/** Attempts to match next token from input string, tests RegExp patterns in decreasing priority */ | ||
}, { | ||
key: "getNextToken", | ||
value: function getNextToken(input, previousToken) { | ||
return this.matchToken(_token.TokenType.LINE_COMMENT, input) || this.matchToken(_token.TokenType.BLOCK_COMMENT, input) || this.matchToken(_token.TokenType.STRING, input) || this.matchQuotedIdentToken(input) || this.matchToken(_token.TokenType.VARIABLE, input) || this.matchToken(_token.TokenType.BLOCK_START, input) || this.matchToken(_token.TokenType.BLOCK_END, input) || this.matchPlaceholderToken(input) || this.matchToken(_token.TokenType.NUMBER, input) || this.matchReservedWordToken(input, previousToken) || this.matchToken(_token.TokenType.IDENT, input) || this.matchToken(_token.TokenType.OPERATOR, input); | ||
value: function getNextToken(previousToken) { | ||
return this.matchToken(_token.TokenType.LINE_COMMENT) || this.matchToken(_token.TokenType.BLOCK_COMMENT) || this.matchToken(_token.TokenType.STRING) || this.matchQuotedIdentToken() || this.matchToken(_token.TokenType.VARIABLE) || this.matchToken(_token.TokenType.BLOCK_START) || this.matchToken(_token.TokenType.BLOCK_END) || this.matchPlaceholderToken() || this.matchToken(_token.TokenType.NUMBER) || this.matchReservedWordToken(previousToken) || this.matchToken(_token.TokenType.IDENT) || this.matchToken(_token.TokenType.OPERATOR); | ||
} | ||
/** | ||
* Attempts to match a placeholder token pattern | ||
* @return {Token | undefined} - The placeholder token if found, otherwise undefined | ||
*/ | ||
}, { | ||
key: "matchPlaceholderToken", | ||
value: function matchPlaceholderToken(input) { | ||
value: function matchPlaceholderToken() { | ||
var _iterator = _createForOfIteratorHelper(this.paramPatterns), | ||
@@ -204,3 +218,2 @@ _step; | ||
var token = this.match({ | ||
input: input, | ||
regex: regex, | ||
@@ -234,5 +247,4 @@ type: _token.TokenType.PARAMETER, | ||
key: "matchQuotedIdentToken", | ||
value: function matchQuotedIdentToken(input) { | ||
value: function matchQuotedIdentToken() { | ||
return this.match({ | ||
input: input, | ||
regex: this.quotedIdentRegex, | ||
@@ -243,10 +255,5 @@ type: _token.TokenType.IDENT, | ||
} | ||
/** | ||
* Attempts to match a Reserved word token pattern, avoiding edge cases of Reserved words within string tokens | ||
* @return {Token | undefined} - The Reserved word token if found, otherwise undefined | ||
*/ | ||
}, { | ||
key: "matchReservedWordToken", | ||
value: function matchReservedWordToken(input, previousToken) { | ||
value: function matchReservedWordToken(previousToken) { | ||
// A reserved word cannot be preceded by a '.' | ||
@@ -259,3 +266,3 @@ // this makes it so in "mytable.from", "from" is not considered a reserved word | ||
return this.matchReservedToken(_token.TokenType.RESERVED_CASE_START, input) || this.matchReservedToken(_token.TokenType.RESERVED_CASE_END, input) || this.matchReservedToken(_token.TokenType.RESERVED_COMMAND, input) || this.matchReservedToken(_token.TokenType.RESERVED_BINARY_COMMAND, input) || this.matchReservedToken(_token.TokenType.RESERVED_DEPENDENT_CLAUSE, input) || this.matchReservedToken(_token.TokenType.RESERVED_LOGICAL_OPERATOR, input) || this.matchReservedToken(_token.TokenType.RESERVED_KEYWORD, input) || this.matchReservedToken(_token.TokenType.RESERVED_JOIN_CONDITION, input); | ||
return this.matchReservedToken(_token.TokenType.RESERVED_CASE_START) || this.matchReservedToken(_token.TokenType.RESERVED_CASE_END) || this.matchReservedToken(_token.TokenType.RESERVED_COMMAND) || this.matchReservedToken(_token.TokenType.RESERVED_BINARY_COMMAND) || this.matchReservedToken(_token.TokenType.RESERVED_DEPENDENT_CLAUSE) || this.matchReservedToken(_token.TokenType.RESERVED_LOGICAL_OPERATOR) || this.matchReservedToken(_token.TokenType.RESERVED_KEYWORD) || this.matchReservedToken(_token.TokenType.RESERVED_JOIN_CONDITION); | ||
} // Helper for matching RESERVED_* tokens which need to be transformed to canonical form | ||
@@ -265,5 +272,4 @@ | ||
key: "matchReservedToken", | ||
value: function matchReservedToken(tokenType, input) { | ||
value: function matchReservedToken(tokenType) { | ||
return this.match({ | ||
input: input, | ||
type: tokenType, | ||
@@ -277,5 +283,4 @@ regex: this.REGEX_MAP[tokenType], | ||
key: "matchToken", | ||
value: function matchToken(tokenType, input) { | ||
value: function matchToken(tokenType) { | ||
return this.match({ | ||
input: input, | ||
type: tokenType, | ||
@@ -285,10 +290,3 @@ regex: this.REGEX_MAP[tokenType], | ||
}); | ||
} | ||
/** | ||
* Attempts to match RegExp from head of input, returning undefined if not found | ||
* @param {string} _.input - The string to match | ||
* @param {TokenType} _.type - The type of token to match against | ||
* @param {RegExp} _.regex - The regex to match | ||
* @return {Token | undefined} - The matched token if found, otherwise undefined | ||
*/ | ||
} // Attempts to match RegExp at current position in input | ||
@@ -298,9 +296,11 @@ }, { | ||
value: function match(_ref2) { | ||
var input = _ref2.input, | ||
type = _ref2.type, | ||
var type = _ref2.type, | ||
regex = _ref2.regex, | ||
transform = _ref2.transform; | ||
var matches = input.match(regex); | ||
regex.lastIndex = this.index; | ||
var matches = this.input.match(regex); | ||
if (matches) { | ||
// Advance current position by matched token length | ||
this.index += matches[1].length; | ||
return { | ||
@@ -321,2 +321,3 @@ type: type, | ||
exports["default"] = Tokenizer; | ||
module.exports = exports.default; | ||
//# sourceMappingURL=Tokenizer.js.map |
import * as regexFactory from './regexFactory'; | ||
import { type Token } from './token'; | ||
export declare const WHITESPACE_REGEX: RegExp; | ||
/** Struct that defines how a SQL language can be broken into tokens */ | ||
@@ -32,2 +31,4 @@ interface TokenizerOptions { | ||
private paramPatterns; | ||
private input; | ||
private index; | ||
private preprocess; | ||
@@ -44,29 +45,12 @@ constructor(cfg: TokenizerOptions); | ||
tokenize(input: string): Token[]; | ||
/** Matches preceding whitespace if present */ | ||
private getWhitespace; | ||
/** Attempts to match next token from input string, tests RegExp patterns in decreasing priority */ | ||
private getNextToken; | ||
/** | ||
* Attempts to match a placeholder token pattern | ||
* @return {Token | undefined} - The placeholder token if found, otherwise undefined | ||
*/ | ||
private matchPlaceholderToken; | ||
private getEscapedPlaceholderKey; | ||
private matchQuotedIdentToken; | ||
/** | ||
* Attempts to match a Reserved word token pattern, avoiding edge cases of Reserved words within string tokens | ||
* @return {Token | undefined} - The Reserved word token if found, otherwise undefined | ||
*/ | ||
private matchReservedWordToken; | ||
private matchReservedToken; | ||
private matchToken; | ||
/** | ||
* Attempts to match RegExp from head of input, returning undefined if not found | ||
* @param {string} _.input - The string to match | ||
* @param {TokenType} _.type - The type of token to match against | ||
* @param {RegExp} _.regex - The regex to match | ||
* @return {Token | undefined} - The matched token if found, otherwise undefined | ||
*/ | ||
private match; | ||
} | ||
export {}; |
import { FormatFn } from "../../src/sqlFormatter"; | ||
interface CommentsConfig { | ||
hashComments?: boolean; | ||
skipTrickyCommentsTest?: boolean; | ||
} | ||
export default function supportsComments(format: FormatFn, opts?: CommentsConfig): void; | ||
export {}; |
{ | ||
"name": "sql-formatter", | ||
"version": "7.0.0", | ||
"version": "7.0.1", | ||
"description": "Format whitespace in a SQL query to make it more readable", | ||
@@ -5,0 +5,0 @@ "license": "MIT", |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
1489153
153
6144