@gltf-transform/functions
Advanced tools
Comparing version 4.0.0-alpha.3 to 4.0.0-alpha.4
@@ -22,3 +22,3 @@ import type { Transform } from '@gltf-transform/core'; | ||
* import { MeshoptEncoder } from 'meshoptimizer'; | ||
* import { reorder } from '@gltf-transform/functions'; | ||
* import { meshopt } from '@gltf-transform/functions'; | ||
* | ||
@@ -28,3 +28,3 @@ * await MeshoptEncoder.ready; | ||
* await document.transform( | ||
* reorder({encoder: MeshoptEncoder, level: 'medium'}) | ||
* meshopt({encoder: MeshoptEncoder, level: 'medium'}) | ||
* ); | ||
@@ -31,0 +31,0 @@ * ``` |
import type { NdArray } from 'ndarray'; | ||
import { Accessor, Primitive, Property, Texture, Transform, TransformContext, vec2 } from '@gltf-transform/core'; | ||
import { Accessor, Document, Primitive, Property, Texture, Transform, TransformContext, TypedArray, vec2 } from '@gltf-transform/core'; | ||
/** | ||
@@ -46,4 +46,7 @@ * Prepares a function used in an {@link Document.transform} pipeline. Use of this wrapper is | ||
export declare function shallowEqualsArray(a: ArrayLike<unknown> | null, b: ArrayLike<unknown> | null): boolean; | ||
/** Clones an {@link Accessor} without creating a copy of its underlying TypedArray data. */ | ||
export declare function shallowCloneAccessor(document: Document, accessor: Accessor): Accessor; | ||
export declare function remapPrimitive(prim: Primitive, remap: TypedArray, dstVertexCount: number): Primitive; | ||
/** @hidden */ | ||
export declare function remapAttribute(attribute: Accessor, remap: Uint32Array, dstCount: number): void; | ||
export declare function remapAttribute(attribute: Accessor, remap: TypedArray, dstCount: number): Accessor; | ||
/** @hidden */ | ||
@@ -65,2 +68,4 @@ export declare function createIndices(count: number, maxIndex?: number): Uint16Array | Uint32Array; | ||
export declare function fitPowerOfTwo(size: vec2, method: ResizePreset): vec2; | ||
export declare function floorPowerOfTwo(value: number): number; | ||
export declare function ceilPowerOfTwo(value: number): number; | ||
export {}; |
@@ -15,5 +15,5 @@ import { Primitive, Transform } from '@gltf-transform/core'; | ||
/** | ||
* Index {@link Primitive Primitives} and (optionally) merge similar vertices. When merged | ||
* Welds {@link Primitive Primitives}, merging similar vertices. When merged | ||
* and indexed, data is shared more efficiently between vertices. File size can | ||
* be reduced, and the GPU can sometimes use the vertex cache more efficiently. | ||
* be reduced, and the GPU uses the vertex cache more efficiently. | ||
* | ||
@@ -24,8 +24,8 @@ * When welding, the 'tolerance' threshold determines which vertices qualify for | ||
* of the AABB's longest dimension. Other vertex attributes are also compared | ||
* during welding, with attribute-specific thresholds. For `tolerance=0`, geometry | ||
* is indexed in place, without merging. | ||
* during welding, with attribute-specific thresholds. For `tolerance=0`, welding | ||
* requires bitwise-equality and completes much faster. | ||
* | ||
* To preserve visual appearance consistently, use low `toleranceNormal` thresholds | ||
* around 0.1 (±3º). To pre-processing a scene before simplification or LOD creation, | ||
* use higher thresholds around 0.5 (±30º). | ||
* To preserve visual appearance consistently with non-zero `tolerance`, use low | ||
* `toleranceNormal` thresholds around 0.1 (±3º). To pre-processing a scene | ||
* before simplification or LOD creation, consider higher thresholds around 0.5 (±30º). | ||
* | ||
@@ -37,5 +37,10 @@ * Example: | ||
* | ||
* await document.transform( | ||
* weld({ tolerance: 0.001, toleranceNormal: 0.5 }) | ||
* ); | ||
* // Lossless and fast. | ||
* await document.transform(weld()); | ||
* | ||
* // Lossy and slower. | ||
* await document.transform(weld({ tolerance: 0.001, toleranceNormal: 0.5 })); | ||
* | ||
* // Lossy and slowest, maximizing vertex count reduction. | ||
* await document.transform(weld({ tolerance: 0.001, toleranceNormal: 0.5, exhaustive: true })); | ||
* ``` | ||
@@ -47,5 +52,5 @@ * | ||
/** | ||
* Index a {@link Primitive} and (optionally) weld similar vertices. When merged | ||
* and indexed, data is shared more efficiently between vertices. File size can | ||
* be reduced, and the GPU can sometimes use the vertex cache more efficiently. | ||
* Welds a {@link Primitive}, merging similar vertices. When merged and | ||
* indexed, data is shared more efficiently between vertices. File size can | ||
* be reduced, and the GPU uses the vertex cache more efficiently. | ||
* | ||
@@ -56,4 +61,4 @@ * When welding, the 'tolerance' threshold determines which vertices qualify for | ||
* of the AABB's longest dimension. Other vertex attributes are also compared | ||
* during welding, with attribute-specific thresholds. For tolerance=0, geometry | ||
* is indexed in place, without merging. | ||
* during welding, with attribute-specific thresholds. For `tolerance=0`, welding | ||
* requires bitwise-equality and completes much faster. | ||
* | ||
@@ -69,3 +74,3 @@ * Example: | ||
* for (const prim of mesh.listPrimitives()) { | ||
* weldPrimitive(prim, {tolerance: 0.0001}); | ||
* weldPrimitive(prim, {tolerance: 0}); | ||
* } | ||
@@ -75,1 +80,7 @@ * ``` | ||
export declare function weldPrimitive(prim: Primitive, _options?: WeldOptions): void; | ||
/** | ||
* References: | ||
* - https://github.com/mikolalysenko/murmurhash-js/blob/f19136e9f9c17f8cddc216ca3d44ec7c5c502f60/murmurhash2_gc.js#L14 | ||
* - https://github.com/zeux/meshoptimizer/blob/e47e1be6d3d9513153188216455bdbed40a206ef/src/indexgenerator.cpp#L12 | ||
*/ | ||
export declare function murmurHash2(h: number, key: Uint32Array): number; |
{ | ||
"name": "@gltf-transform/functions", | ||
"version": "4.0.0-alpha.3", | ||
"version": "4.0.0-alpha.4", | ||
"repository": "github:donmccurdy/glTF-Transform", | ||
@@ -38,4 +38,4 @@ "homepage": "https://gltf-transform.dev/functions.html", | ||
"dependencies": { | ||
"@gltf-transform/core": "^4.0.0-alpha.3", | ||
"@gltf-transform/extensions": "^4.0.0-alpha.3", | ||
"@gltf-transform/core": "^4.0.0-alpha.4", | ||
"@gltf-transform/extensions": "^4.0.0-alpha.4", | ||
"ktx-parse": "^0.6.0", | ||
@@ -56,3 +56,3 @@ "ndarray": "^1.0.19", | ||
}, | ||
"gitHead": "f4c8a697fb8301f721edc4fb57aea30a42db1f77" | ||
"gitHead": "a062d5e78cec994d5ea01299fcab6357cd0a6452" | ||
} |
@@ -1,3 +0,2 @@ | ||
import type { Primitive } from '@gltf-transform/core'; | ||
import { createIndices } from './utils.js'; | ||
import { ComponentTypeToTypedArray, Primitive } from '@gltf-transform/core'; | ||
@@ -12,22 +11,23 @@ /** | ||
export function cleanPrimitive(prim: Primitive): void { | ||
// TODO(feat): Clean degenerate primitives of other topologies. | ||
const indices = prim.getIndices(); | ||
if (!indices) return; | ||
if (!indices || prim.getMode() !== Primitive.Mode.TRIANGLES) return; | ||
const tmpIndicesArray = []; | ||
const srcIndicesArray = indices.getArray()!; | ||
const dstIndicesArray = []; | ||
let maxIndex = -Infinity; | ||
for (let i = 0, il = indices.getCount(); i < il; i += 3) { | ||
const a = indices.getScalar(i); | ||
const b = indices.getScalar(i + 1); | ||
const c = indices.getScalar(i + 2); | ||
for (let i = 0, il = srcIndicesArray.length; i < il; i += 3) { | ||
const a = srcIndicesArray[i]; | ||
const b = srcIndicesArray[i + 1]; | ||
const c = srcIndicesArray[i + 2]; | ||
if (a === b || a === c || b === c) continue; | ||
tmpIndicesArray.push(a, b, c); | ||
dstIndicesArray.push(a, b, c); | ||
maxIndex = Math.max(maxIndex, a, b, c); | ||
} | ||
const dstIndicesArray = createIndices(tmpIndicesArray.length, maxIndex); | ||
dstIndicesArray.set(tmpIndicesArray); | ||
indices.setArray(dstIndicesArray); | ||
const TypedArray = ComponentTypeToTypedArray[indices.getComponentType()]; | ||
indices.setArray(new TypedArray(dstIndicesArray)); | ||
} |
import { Document, Primitive, ComponentTypeToTypedArray } from '@gltf-transform/core'; | ||
import { createIndices, createPrimGroupKey } from './utils.js'; | ||
import { createIndices, createPrimGroupKey, shallowCloneAccessor } from './utils.js'; | ||
@@ -77,8 +77,5 @@ interface JoinPrimitiveOptions { | ||
const AttributeArray = ComponentTypeToTypedArray[tplAttribute.getComponentType()]; | ||
const dstAttribute = document | ||
.createAccessor() | ||
.setType(tplAttribute.getType()) | ||
.setBuffer(tplAttribute.getBuffer()) | ||
.setNormalized(tplAttribute.getNormalized()) | ||
.setArray(new AttributeArray(dstVertexCount * tplAttribute.getElementSize())); | ||
const dstAttribute = shallowCloneAccessor(document, tplAttribute).setArray( | ||
new AttributeArray(dstVertexCount * tplAttribute.getElementSize()), | ||
); | ||
dstPrim.setAttribute(semantic, dstAttribute); | ||
@@ -88,9 +85,6 @@ } | ||
// (4) Allocate joined indices. | ||
const dstIndicesArray = templatePrim.getIndices() ? createIndices(dstVertexCount) : null; | ||
const dstIndices = | ||
dstIndicesArray && | ||
document | ||
.createAccessor() | ||
.setBuffer(templatePrim.getIndices()!.getBuffer()) | ||
.setArray(createIndices(dstIndicesCount, dstVertexCount)); | ||
const srcIndices = templatePrim.getIndices(); | ||
const dstIndices = srcIndices | ||
? shallowCloneAccessor(document, srcIndices).setArray(createIndices(dstIndicesCount, dstVertexCount)) | ||
: null; | ||
dstPrim.setIndices(dstIndices); | ||
@@ -97,0 +91,0 @@ |
@@ -34,3 +34,3 @@ import type { Document, Transform } from '@gltf-transform/core'; | ||
* import { MeshoptEncoder } from 'meshoptimizer'; | ||
* import { reorder } from '@gltf-transform/functions'; | ||
* import { meshopt } from '@gltf-transform/functions'; | ||
* | ||
@@ -40,3 +40,3 @@ * await MeshoptEncoder.ready; | ||
* await document.transform( | ||
* reorder({encoder: MeshoptEncoder, level: 'medium'}) | ||
* meshopt({encoder: MeshoptEncoder, level: 'medium'}) | ||
* ); | ||
@@ -43,0 +43,0 @@ * ``` |
@@ -154,11 +154,11 @@ import { | ||
const material = prim.getMaterial(); | ||
const required = listRequiredSemantics(document, material); | ||
if (!material) continue; | ||
const required = listRequiredSemantics(document, prim, material); | ||
const unused = listUnusedSemantics(prim, required); | ||
pruneAttributes(prim, unused); | ||
prim.listTargets().forEach((target) => pruneAttributes(target, unused)); | ||
if (material) { | ||
materialPrims.has(material) | ||
? materialPrims.get(material)!.add(prim) | ||
: materialPrims.set(material, new Set([prim])); | ||
} | ||
materialPrims.has(material) | ||
? materialPrims.get(material)!.add(prim) | ||
: materialPrims.set(material, new Set([prim])); | ||
} | ||
@@ -331,4 +331,6 @@ } | ||
for (const semantic of prim.listSemantics()) { | ||
if (semantic === 'TANGENT' && !required.has(semantic)) { | ||
if (semantic === 'NORMAL' && !required.has(semantic)) { | ||
unused.push(semantic); | ||
} else if (semantic === 'TANGENT' && !required.has(semantic)) { | ||
unused.push(semantic); | ||
} else if (semantic.startsWith('TEXCOORD_') && !required.has(semantic)) { | ||
@@ -349,7 +351,6 @@ unused.push(semantic); | ||
document: Document, | ||
material: Material | ExtensionProperty | null, | ||
prim: Primitive, | ||
material: Material | ExtensionProperty, | ||
semantics = new Set<string>(), | ||
): Set<string> { | ||
if (!material) return semantics; | ||
const graph = document.getGraph(); | ||
@@ -381,3 +382,3 @@ | ||
if (child instanceof ExtensionProperty) { | ||
listRequiredSemantics(document, child, semantics); | ||
listRequiredSemantics(document, prim, child, semantics); | ||
} | ||
@@ -388,2 +389,8 @@ | ||
const isLit = material instanceof Material && !material.getExtension('KHR_materials_unlit'); | ||
const isPoints = prim.getMode() === Primitive.Mode.POINTS; | ||
if (isLit && !isPoints) { | ||
semantics.add('NORMAL'); | ||
} | ||
return semantics; | ||
@@ -390,0 +397,0 @@ } |
import { Accessor, Document, GLTF, Primitive, PropertyType, Transform } from '@gltf-transform/core'; | ||
import { prune } from './prune.js'; | ||
import { createTransform, deepListAttributes, remapAttribute, SetMap } from './utils.js'; | ||
import { createTransform, deepListAttributes, remapAttribute, SetMap, shallowCloneAccessor } from './utils.js'; | ||
import type { MeshoptEncoder } from 'meshoptimizer'; | ||
@@ -51,14 +51,15 @@ | ||
return createTransform(NAME, async (doc: Document): Promise<void> => { | ||
const logger = doc.getLogger(); | ||
return createTransform(NAME, async (document: Document): Promise<void> => { | ||
const logger = document.getLogger(); | ||
await encoder.ready; | ||
const plan = createLayoutPlan(doc); | ||
const plan = createLayoutPlan(document); | ||
for (const srcIndices of plan.indicesToAttributes.keys()) { | ||
const dstIndices = srcIndices.clone(); | ||
let indicesArray = dstIndices.getArray()!.slice(); | ||
let indicesArray = srcIndices.getArray()!; | ||
if (!(indicesArray instanceof Uint32Array)) { | ||
indicesArray = new Uint32Array(indicesArray); | ||
} else { | ||
indicesArray = indicesArray.slice(); | ||
} | ||
@@ -73,2 +74,3 @@ | ||
const dstIndices = shallowCloneAccessor(document, srcIndices); | ||
dstIndices.setArray(unique <= 65534 ? new Uint16Array(indicesArray) : indicesArray); | ||
@@ -78,3 +80,3 @@ | ||
for (const srcAttribute of plan.indicesToAttributes.get(srcIndices)) { | ||
const dstAttribute = srcAttribute.clone(); | ||
const dstAttribute = shallowCloneAccessor(document, srcAttribute); | ||
remapAttribute(dstAttribute, remap, unique); | ||
@@ -96,3 +98,3 @@ for (const prim of plan.attributesToPrimitives.get(srcAttribute)) { | ||
// Clean up any attributes left unused by earlier cloning. | ||
await doc.transform( | ||
await document.transform( | ||
prune({ | ||
@@ -99,0 +101,0 @@ propertyTypes: [PropertyType.ACCESSOR], |
@@ -9,2 +9,3 @@ import { Accessor, Document, Primitive, PropertyType, Transform, TransformContext } from '@gltf-transform/core'; | ||
isTransformPending, | ||
shallowCloneAccessor, | ||
} from './utils.js'; | ||
@@ -179,3 +180,3 @@ import { weld } from './weld.js'; | ||
for (const srcAttribute of deepListAttributes(prim)) { | ||
const dstAttribute = srcAttribute.clone(); | ||
const dstAttribute = shallowCloneAccessor(document, srcAttribute); | ||
remapAttribute(dstAttribute, remap, unique); | ||
@@ -188,3 +189,3 @@ deepSwapAttribute(prim, srcAttribute, dstAttribute); | ||
const dstIndices = srcIndices.clone(); | ||
const dstIndices = shallowCloneAccessor(document, srcIndices); | ||
dstIndices.setArray(srcVertexCount <= 65534 ? new Uint16Array(dstIndicesArray) : dstIndicesArray); | ||
@@ -191,0 +192,0 @@ prim.setIndices(dstIndices); |
@@ -101,3 +101,3 @@ import { Accessor, Document, ILogger, Primitive, Transform, TypedArray, uuid } from '@gltf-transform/core'; | ||
normal instanceof Float32Array ? normal : new Float32Array(normal), | ||
texcoord instanceof Float32Array ? texcoord : new Float32Array(texcoord) | ||
texcoord instanceof Float32Array ? texcoord : new Float32Array(texcoord), | ||
); | ||
@@ -147,3 +147,3 @@ | ||
`${NAME}: Skipping primitive ${i} of mesh "${meshName}": primitives must` + | ||
' have attributes=[POSITION, NORMAL, TEXCOORD_0] and mode=TRIANGLES.' | ||
' have attributes=[POSITION, NORMAL, TEXCOORD_0] and mode=TRIANGLES.', | ||
); | ||
@@ -159,3 +159,2 @@ return false; | ||
if (prim.getIndices()) { | ||
// TODO(feat): Do this automatically for qualifying primitives. | ||
logger.warn(`${NAME}: Skipping primitive ${i} of mesh "${meshName}": primitives must` + ' be unwelded.'); | ||
@@ -162,0 +161,0 @@ return false; |
@@ -12,4 +12,6 @@ import type { NdArray } from 'ndarray'; | ||
TransformContext, | ||
TypedArray, | ||
vec2, | ||
} from '@gltf-transform/core'; | ||
import { cleanPrimitive } from './clean-primitive.js'; | ||
@@ -181,4 +183,61 @@ /** | ||
/** Clones an {@link Accessor} without creating a copy of its underlying TypedArray data. */ | ||
export function shallowCloneAccessor(document: Document, accessor: Accessor): Accessor { | ||
return document | ||
.createAccessor(accessor.getName()) | ||
.setArray(accessor.getArray()) | ||
.setType(accessor.getType()) | ||
.setBuffer(accessor.getBuffer()) | ||
.setNormalized(accessor.getNormalized()) | ||
.setSparse(accessor.getSparse()); | ||
} | ||
export function remapPrimitive(prim: Primitive, remap: TypedArray, dstVertexCount: number): Primitive { | ||
const document = Document.fromGraph(prim.getGraph())!; | ||
// Remap indices. | ||
const srcVertexCount = prim.getAttribute('POSITION')!.getCount(); | ||
const srcIndices = prim.getIndices(); | ||
const srcIndicesArray = srcIndices ? srcIndices.getArray() : null; | ||
const dstIndices = document.createAccessor(); | ||
const dstIndicesCount = srcIndices ? srcIndices.getCount() : srcVertexCount; // primitive count does not change. | ||
const dstIndicesArray = createIndices(dstIndicesCount, dstVertexCount); | ||
for (let i = 0; i < dstIndicesCount; i++) { | ||
dstIndicesArray[i] = remap[srcIndicesArray ? srcIndicesArray[i] : i]; | ||
} | ||
prim.setIndices(dstIndices.setArray(dstIndicesArray)); | ||
// Remap vertices. | ||
const srcAttributes = deepListAttributes(prim); | ||
for (const srcAttribute of prim.listAttributes()) { | ||
const dstAttribute = shallowCloneAccessor(document, srcAttribute); | ||
prim.swap(srcAttribute, remapAttribute(dstAttribute, remap, dstVertexCount)); | ||
if (srcAttribute.listParents().length === 1) srcAttribute.dispose(); | ||
} | ||
for (const target of prim.listTargets()) { | ||
for (const srcAttribute of target.listAttributes()) { | ||
const dstAttribute = shallowCloneAccessor(document, srcAttribute); | ||
target.swap(srcAttribute, remapAttribute(dstAttribute, remap, dstVertexCount)); | ||
if (srcAttribute.listParents().length === 1) srcAttribute.dispose(); | ||
} | ||
} | ||
// Clean up accessors. | ||
if (srcIndices && srcIndices.listParents().length === 1) srcIndices.dispose(); | ||
for (const srcAttribute of srcAttributes) { | ||
if (srcAttribute.listParents().length === 1) srcAttribute.dispose(); | ||
} | ||
// Clean up degenerate topology. | ||
cleanPrimitive(prim); | ||
return prim; | ||
} | ||
/** @hidden */ | ||
export function remapAttribute(attribute: Accessor, remap: Uint32Array, dstCount: number) { | ||
export function remapAttribute(attribute: Accessor, remap: TypedArray, dstCount: number): Accessor { | ||
const elementSize = attribute.getElementSize(); | ||
@@ -188,10 +247,14 @@ const srcCount = attribute.getCount(); | ||
const dstArray = srcArray.slice(0, dstCount * elementSize); | ||
const done = new Uint8Array(dstCount); | ||
for (let i = 0; i < srcCount; i++) { | ||
for (let srcIndex = 0; srcIndex < srcCount; srcIndex++) { | ||
const dstIndex = remap[srcIndex]; | ||
if (done[dstIndex]) continue; | ||
for (let j = 0; j < elementSize; j++) { | ||
dstArray[remap[i] * elementSize + j] = srcArray[i * elementSize + j]; | ||
dstArray[dstIndex * elementSize + j] = srcArray[srcIndex * elementSize + j]; | ||
} | ||
done[dstIndex] = 1; | ||
} | ||
attribute.setArray(dstArray); | ||
return attribute.setArray(dstArray); | ||
} | ||
@@ -310,8 +373,8 @@ | ||
function floorPowerOfTwo(value: number): number { | ||
export function floorPowerOfTwo(value: number): number { | ||
return Math.pow(2, Math.floor(Math.log(value) / Math.LN2)); | ||
} | ||
function ceilPowerOfTwo(value: number): number { | ||
export function ceilPowerOfTwo(value: number): number { | ||
return Math.pow(2, Math.ceil(Math.log(value) / Math.LN2)); | ||
} |
348
src/weld.ts
@@ -1,49 +0,59 @@ | ||
import { | ||
Accessor, | ||
Document, | ||
Primitive, | ||
PrimitiveTarget, | ||
PropertyType, | ||
Transform, | ||
TypedArray, | ||
vec3, | ||
} from '@gltf-transform/core'; | ||
import { cleanPrimitive } from './clean-primitive.js'; | ||
import { Accessor, BufferUtils, Document, Primitive, PropertyType, Transform, vec3 } from '@gltf-transform/core'; | ||
import { dedup } from './dedup.js'; | ||
import { prune } from './prune.js'; | ||
import { createIndices, createTransform, formatDeltaOp } from './utils.js'; | ||
import { | ||
ceilPowerOfTwo, | ||
createIndices, | ||
createTransform, | ||
deepListAttributes, | ||
formatDeltaOp, | ||
remapPrimitive, | ||
} from './utils.js'; | ||
// DEVELOPER NOTES: Ideally a weld() implementation should be fast, robust, | ||
// and tunable. The writeup below tracks my attempts to solve for these | ||
// constraints. | ||
// | ||
// (Approach #1) Follow the mergeVertices() implementation of three.js, | ||
// hashing vertices with a string concatenation of all vertex attributes. | ||
// The approach does not allow per-attribute tolerance in local units. | ||
// | ||
// (Approach #2) Sort points along the X axis, then make cheaper | ||
// searches up/down the sorted list for merge candidates. While this allows | ||
// simpler comparison based on specified tolerance, it's much slower, even | ||
// for cases where choice of the X vs. Y or Z axes is reasonable. | ||
// | ||
// (Approach #3) Attempted a Delaunay triangulation in three dimensions, | ||
// expecting it would be an n * log(n) algorithm, but the only implementation | ||
// I found (with delaunay-triangulate) appeared to be much slower than that, | ||
// and was notably slower than the sort-based approach, just building the | ||
// Delaunay triangulation alone. | ||
// | ||
// (Approach #4) Hybrid of (1) and (2), assigning vertices to a spatial | ||
// grid, then searching the local neighborhood (27 cells) for weld candidates. | ||
// | ||
// RESULTS: For the "Lovecraftian" sample model, after joining, a primitive | ||
// with 873,000 vertices can be welded down to 230,000 vertices. Results: | ||
// - (1) Not tested, but prior results suggest not robust enough. | ||
// - (2) 30 seconds | ||
// - (3) 660 seconds | ||
// - (4) 5 seconds exhaustive, 1.5s non-exhaustive | ||
/** | ||
* CONTRIBUTOR NOTES | ||
* | ||
* Ideally a weld() implementation should be fast, robust, and tunable. The | ||
* writeup below tracks my attempts to solve for these constraints. | ||
* | ||
* (Approach #1) Follow the mergeVertices() implementation of three.js, | ||
* hashing vertices with a string concatenation of all vertex attributes. | ||
* The approach does not allow per-attribute tolerance in local units. | ||
* | ||
* (Approach #2) Sort points along the X axis, then make cheaper | ||
* searches up/down the sorted list for merge candidates. While this allows | ||
* simpler comparison based on specified tolerance, it's much slower, even | ||
* for cases where choice of the X vs. Y or Z axes is reasonable. | ||
* | ||
* (Approach #3) Attempted a Delaunay triangulation in three dimensions, | ||
* expecting it would be an n * log(n) algorithm, but the only implementation | ||
* I found (with delaunay-triangulate) appeared to be much slower than that, | ||
* and was notably slower than the sort-based approach, just building the | ||
* Delaunay triangulation alone. | ||
* | ||
* (Approach #4) Hybrid of (1) and (2), assigning vertices to a spatial | ||
* grid, then searching the local neighborhood (27 cells) for weld candidates. | ||
* | ||
* (Approach #5) Based on Meshoptimizer's implementation, when tolerance=0 | ||
* use a hashtable to find bitwise-equal vertices quickly. Vastly faster than | ||
* previous approaches, but without tolerance options. | ||
* | ||
* RESULTS: For the "Lovecraftian" sample model linked below, after joining, | ||
* a primitive with 873,000 vertices can be welded down to 230,000 vertices. | ||
* https://sketchfab.com/3d-models/sculpt-january-day-19-lovecraftian-34ad2501108e4fceb9394f5b816b9f42 | ||
* | ||
* - (1) Not tested, but prior results suggest not robust enough. | ||
* - (2) 30s | ||
* - (3) 660s | ||
* - (4) 5s exhaustive, 1.5s non-exhaustive | ||
* - (5) 0.2s | ||
*/ | ||
const NAME = 'weld'; | ||
/** Flags 'empty' values in a Uint32Array index. */ | ||
const EMPTY = 2 ** 32 - 1; | ||
const Tolerance = { | ||
DEFAULT: 0.0001, | ||
DEFAULT: 0, | ||
TEXCOORD: 0.0001, // [0, 1] | ||
@@ -76,5 +86,5 @@ COLOR: 0.01, // [0, 1] | ||
/** | ||
* Index {@link Primitive Primitives} and (optionally) merge similar vertices. When merged | ||
* Welds {@link Primitive Primitives}, merging similar vertices. When merged | ||
* and indexed, data is shared more efficiently between vertices. File size can | ||
* be reduced, and the GPU can sometimes use the vertex cache more efficiently. | ||
* be reduced, and the GPU uses the vertex cache more efficiently. | ||
* | ||
@@ -85,8 +95,8 @@ * When welding, the 'tolerance' threshold determines which vertices qualify for | ||
* of the AABB's longest dimension. Other vertex attributes are also compared | ||
* during welding, with attribute-specific thresholds. For `tolerance=0`, geometry | ||
* is indexed in place, without merging. | ||
* during welding, with attribute-specific thresholds. For `tolerance=0`, welding | ||
* requires bitwise-equality and completes much faster. | ||
* | ||
* To preserve visual appearance consistently, use low `toleranceNormal` thresholds | ||
* around 0.1 (±3º). To pre-processing a scene before simplification or LOD creation, | ||
* use higher thresholds around 0.5 (±30º). | ||
* To preserve visual appearance consistently with non-zero `tolerance`, use low | ||
* `toleranceNormal` thresholds around 0.1 (±3º). To pre-processing a scene | ||
* before simplification or LOD creation, consider higher thresholds around 0.5 (±30º). | ||
* | ||
@@ -98,5 +108,10 @@ * Example: | ||
* | ||
* await document.transform( | ||
* weld({ tolerance: 0.001, toleranceNormal: 0.5 }) | ||
* ); | ||
* // Lossless and fast. | ||
* await document.transform(weld()); | ||
* | ||
* // Lossy and slower. | ||
* await document.transform(weld({ tolerance: 0.001, toleranceNormal: 0.5 })); | ||
* | ||
* // Lossy and slowest, maximizing vertex count reduction. | ||
* await document.transform(weld({ tolerance: 0.001, toleranceNormal: 0.5, exhaustive: true })); | ||
* ``` | ||
@@ -141,5 +156,5 @@ * | ||
/** | ||
* Index a {@link Primitive} and (optionally) weld similar vertices. When merged | ||
* and indexed, data is shared more efficiently between vertices. File size can | ||
* be reduced, and the GPU can sometimes use the vertex cache more efficiently. | ||
* Welds a {@link Primitive}, merging similar vertices. When merged and | ||
* indexed, data is shared more efficiently between vertices. File size can | ||
* be reduced, and the GPU uses the vertex cache more efficiently. | ||
* | ||
@@ -150,4 +165,4 @@ * When welding, the 'tolerance' threshold determines which vertices qualify for | ||
* of the AABB's longest dimension. Other vertex attributes are also compared | ||
* during welding, with attribute-specific thresholds. For tolerance=0, geometry | ||
* is indexed in place, without merging. | ||
* during welding, with attribute-specific thresholds. For `tolerance=0`, welding | ||
* requires bitwise-equality and completes much faster. | ||
* | ||
@@ -163,3 +178,3 @@ * Example: | ||
* for (const prim of mesh.listPrimitives()) { | ||
* weldPrimitive(prim, {tolerance: 0.0001}); | ||
* weldPrimitive(prim, {tolerance: 0}); | ||
* } | ||
@@ -177,3 +192,3 @@ * ``` | ||
if (_options.tolerance === 0) { | ||
_indexPrimitive(document, prim); | ||
_weldPrimitiveStrict(document, prim); | ||
} else { | ||
@@ -184,24 +199,45 @@ _weldPrimitive(document, prim, options); | ||
/** @internal Adds indices, if missing. Does not merge vertices. */ | ||
function _indexPrimitive(doc: Document, prim: Primitive): void { | ||
// No need to overwrite here, even if options.overwrite=true. | ||
if (prim.getIndices()) return; | ||
/** @internal Weld and merge, combining vertices that are bitwise-equal. */ | ||
function _weldPrimitiveStrict(document: Document, prim: Primitive): void { | ||
const logger = document.getLogger(); | ||
const srcVertexCount = prim.getAttribute('POSITION')!.getCount(); | ||
const srcIndices = prim.getIndices(); | ||
const srcIndicesArray = srcIndices?.getArray(); | ||
const srcIndicesCount = srcIndices ? srcIndices.getCount() : srcVertexCount; | ||
const attr = prim.listAttributes()[0]; | ||
const numVertices = attr.getCount(); | ||
const buffer = attr.getBuffer(); | ||
const indices = doc | ||
.createAccessor() | ||
.setBuffer(buffer) | ||
.setType(Accessor.Type.SCALAR) | ||
.setArray(createIndices(numVertices)); | ||
prim.setIndices(indices); | ||
const hash = new HashTable(prim); | ||
const tableSize = ceilPowerOfTwo(srcVertexCount + srcVertexCount / 4); | ||
const table = new Uint32Array(tableSize).fill(EMPTY); | ||
const writeMap = new Uint32Array(srcVertexCount).fill(EMPTY); // oldIndex → newIndex | ||
// (1) Compare and identify indices to weld. | ||
let dstVertexCount = 0; | ||
for (let i = 0; i < srcIndicesCount; i++) { | ||
const srcIndex = srcIndicesArray ? srcIndicesArray[i] : i; | ||
if (writeMap[srcIndex] !== EMPTY) continue; | ||
const hashIndex = hashLookup(table, tableSize, hash, srcIndex, EMPTY); | ||
const dstIndex = table[hashIndex]; | ||
if (dstIndex === EMPTY) { | ||
table[hashIndex] = srcIndex; | ||
writeMap[srcIndex] = dstVertexCount++; | ||
} else { | ||
writeMap[srcIndex] = writeMap[dstIndex]; | ||
} | ||
} | ||
logger.debug(`${NAME}: ${formatDeltaOp(srcVertexCount, dstVertexCount)} vertices.`); | ||
remapPrimitive(prim, writeMap, dstVertexCount); | ||
} | ||
/** @internal Weld and merge, combining vertices that are similar on all vertex attributes. */ | ||
function _weldPrimitive(doc: Document, prim: Primitive, options: Required<WeldOptions>): void { | ||
const logger = doc.getLogger(); | ||
/** @internal Weld and merge, combining vertices within tolerance. */ | ||
function _weldPrimitive(document: Document, prim: Primitive, options: Required<WeldOptions>): void { | ||
const logger = document.getLogger(); | ||
const srcPosition = prim.getAttribute('POSITION')!; | ||
const srcIndices = prim.getIndices() || doc.createAccessor().setArray(createIndices(srcPosition.getCount())); | ||
const srcIndices = prim.getIndices() || document.createAccessor().setArray(createIndices(srcPosition.getCount())); | ||
const uniqueIndices = new Uint32Array(new Set(srcIndices.getArray()!)).sort(); | ||
@@ -219,3 +255,3 @@ | ||
// (2) Compare and identify vertices to weld. | ||
// (2) Build the lookup grid. | ||
@@ -235,7 +271,7 @@ const posA: vec3 = [0, 0, 0]; | ||
// (2) Compare and identify vertices to weld. | ||
// (3) Compare and identify vertices to weld. | ||
const srcMaxIndex = uniqueIndices[uniqueIndices.length - 1]; | ||
const weldMap = createIndices(srcMaxIndex + 1); // oldIndex → oldCommonIndex | ||
const writeMap = new Array(uniqueIndices.length).fill(-1); // oldIndex → newIndex | ||
const writeMap = new Uint32Array(uniqueIndices.length).fill(EMPTY); // oldIndex → newIndex | ||
@@ -295,58 +331,5 @@ const srcVertexCount = srcPosition.getCount(); | ||
// (3) Update indices. | ||
const dstIndicesCount = srcIndices.getCount(); // # primitives does not change. | ||
const dstIndicesArray = createIndices(dstIndicesCount, uniqueIndices.length); | ||
for (let i = 0; i < dstIndicesCount; i++) { | ||
dstIndicesArray[i] = writeMap[srcIndices.getScalar(i)]; | ||
} | ||
prim.setIndices(srcIndices.clone().setArray(dstIndicesArray)); | ||
if (srcIndices.listParents().length === 1) srcIndices.dispose(); | ||
// (4) Update vertex attributes. | ||
for (const srcAttr of prim.listAttributes()) { | ||
swapAttributes(prim, srcAttr, writeMap, dstVertexCount); | ||
} | ||
for (const target of prim.listTargets()) { | ||
for (const srcAttr of target.listAttributes()) { | ||
swapAttributes(target, srcAttr, writeMap, dstVertexCount); | ||
} | ||
} | ||
// (5) Clean up degenerate triangles. | ||
cleanPrimitive(prim); | ||
remapPrimitive(prim, writeMap, dstVertexCount); | ||
} | ||
/** Creates a new TypedArray of the same type as an original, with a new length. */ | ||
function createArrayOfType<T extends TypedArray>(array: T, length: number): T { | ||
const ArrayCtor = array.constructor as new (length: number) => T; | ||
return new ArrayCtor(length); | ||
} | ||
/** Replaces an {@link Attribute}, creating a new one with the given elements. */ | ||
function swapAttributes( | ||
parent: Primitive | PrimitiveTarget, | ||
srcAttr: Accessor, | ||
reorder: number[], | ||
dstCount: number, | ||
): void { | ||
const dstAttrArray = createArrayOfType(srcAttr.getArray()!, dstCount * srcAttr.getElementSize()); | ||
const dstAttr = srcAttr.clone().setArray(dstAttrArray); | ||
const done = new Uint8Array(dstCount); | ||
for (let i = 0, el = [] as number[]; i < reorder.length; i++) { | ||
if (!done[reorder[i]]) { | ||
dstAttr.setElement(reorder[i], srcAttr.getElement(i, el)); | ||
done[reorder[i]] = 1; | ||
} | ||
} | ||
parent.swap(srcAttr, dstAttr); | ||
// Clean up. | ||
if (srcAttr.listParents().length === 1) srcAttr.dispose(); | ||
} | ||
const _a = [] as number[]; | ||
@@ -445,1 +428,102 @@ const _b = [] as number[]; | ||
} | ||
/****************************************************************************** | ||
* MURMUR HASH | ||
*/ | ||
class HashTable { | ||
private attributes: { u8: Uint8Array; byteStride: number; paddedByteStride: number }[] = []; | ||
/** Temporary vertex views in 4-byte-aligned memory. */ | ||
private u8: Uint8Array; | ||
private u32: Uint32Array; | ||
constructor(prim: Primitive) { | ||
let byteStride = 0; | ||
for (const attribute of deepListAttributes(prim)) { | ||
byteStride += this._initAttribute(attribute); | ||
} | ||
this.u8 = new Uint8Array(byteStride); | ||
this.u32 = new Uint32Array(this.u8.buffer); | ||
} | ||
private _initAttribute(attribute: Accessor): number { | ||
const array = attribute.getArray()!; | ||
const u8 = new Uint8Array(array.buffer, array.byteOffset, array.byteLength); | ||
const byteStride = attribute.getElementSize() * attribute.getComponentSize(); | ||
const paddedByteStride = BufferUtils.padNumber(byteStride); | ||
this.attributes.push({ u8, byteStride, paddedByteStride }); | ||
return paddedByteStride; | ||
} | ||
hash(index: number): number { | ||
// Load vertex into 4-byte-aligned view. | ||
let byteOffset = 0; | ||
for (const { u8, byteStride, paddedByteStride } of this.attributes) { | ||
for (let i = 0; i < paddedByteStride; i++) { | ||
if (i < byteStride) { | ||
this.u8[byteOffset + i] = u8[index * byteStride + i]; | ||
} else { | ||
this.u8[byteOffset + i] = 0; | ||
} | ||
} | ||
byteOffset += paddedByteStride; | ||
} | ||
// Compute hash. | ||
return murmurHash2(0, this.u32); | ||
} | ||
equal(a: number, b: number): boolean { | ||
for (const { u8, byteStride } of this.attributes) { | ||
for (let j = 0; j < byteStride; j++) { | ||
if (u8[a * byteStride + j] !== u8[b * byteStride + j]) { | ||
return false; | ||
} | ||
} | ||
} | ||
return true; | ||
} | ||
} | ||
/** | ||
* References: | ||
* - https://github.com/mikolalysenko/murmurhash-js/blob/f19136e9f9c17f8cddc216ca3d44ec7c5c502f60/murmurhash2_gc.js#L14 | ||
* - https://github.com/zeux/meshoptimizer/blob/e47e1be6d3d9513153188216455bdbed40a206ef/src/indexgenerator.cpp#L12 | ||
*/ | ||
export function murmurHash2(h: number, key: Uint32Array): number { | ||
// MurmurHash2 | ||
const m = 0x5bd1e995; | ||
const r = 24; | ||
for (let i = 0, il = key.length; i < il; i++) { | ||
let k = key[i]; | ||
k = Math.imul(k, m) >>> 0; | ||
k = (k ^ (k >> r)) >>> 0; | ||
k = Math.imul(k, m) >>> 0; | ||
h = Math.imul(h, m) >>> 0; | ||
h = (h ^ k) >>> 0; | ||
} | ||
return h; | ||
} | ||
function hashLookup(table: Uint32Array, buckets: number, hash: HashTable, key: number, empty = EMPTY): number { | ||
const hashmod = buckets - 1; | ||
const hashval = hash.hash(key); | ||
let bucket = hashval & hashmod; | ||
for (let probe = 0; probe <= hashmod; probe++) { | ||
const item = table[bucket]; | ||
if (item === empty || hash.equal(item, key)) { | ||
return bucket; | ||
} | ||
bucket = (bucket + probe + 1) & hashmod; // Hash collision. | ||
} | ||
throw new Error('Hash table full.'); | ||
} |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
2132174
25147
0