You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

lapx

Package Overview
Dependencies
Maintainers
1
Versions
28
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

lapx - pypi Package Compare versions

Comparing version
0.6.2
to
0.7.0
+159
lap/lapjvs.py
# Copyright (c) 2025 Ratha SIV | MIT License
import numpy as np
from typing import Optional, Tuple
from ._lapjvs import lapjvs as _lapjvs_raw
def lapjvs(
cost: np.ndarray,
extend_cost: Optional[bool] = None,
return_cost: bool = True,
jvx_like: bool = True,
prefer_float32: bool = False,
):
"""
Solve the Linear Assignment Problem using the 'lapjvs' algorithm.
- Accepts rectangular cost matrices (pad to square internally).
- Returns lapjv-like (x,y) or jvx-like (rows, cols).
- Optional prefer_float32 to reduce memory bandwidth.
Parameters
----------
cost : np.ndarray
Cost matrix of shape (n, m). dtype should be a floating type.
extend_cost : Optional[bool]
If True, treat rectangular inputs by extending (zero-padding) to square.
If False, require square input.
If None (default), auto-detect: extend if n != m.
return_cost : bool
If True (default), return total cost as the first element of the return tuple.
jvx_like : bool
If False (default), return lapjv-style mapping vectors:
- return_cost=True -> (total_cost, x, y)
- return_cost=False -> (x, y)
If True, return lapjvx/SciPy-style arrays:
- return_cost=True -> (total_cost, row_indices, col_indices)
- return_cost=False -> (row_indices, col_indices)
Returns
-------
One of:
- (cost, x, y) if return_cost and not jvx_like
- (x, y) if not return_cost and not jvx_like
- (cost, row_indices, col_indices) if return_cost and jvx_like
- (row_indices, col_indices) if not return_cost and jvx_like
Where:
- x: np.ndarray shape (n,), dtype=int. x[r] is assigned column for row r, or -1.
- y: np.ndarray shape (m,), dtype=int. y[c] is assigned row for column c, or -1.
- row_indices, col_indices: 1D int arrays of equal length K, listing matched (row, col) pairs.
Notes
-----
- For square inputs without extension, this wraps the raw C function directly and adapts outputs.
- For rectangular inputs, zero-padding exactly models the rectangular LAP.
"""
a = np.asarray(cost)
if a.ndim != 2:
raise ValueError("cost must be a 2D array")
if prefer_float32 and a.dtype != np.float32:
a = a.astype(np.float32, copy=False)
n, m = a.shape
extend = (n != m) if (extend_cost is None) else bool(extend_cost)
def _rows_cols_from_x(x_vec: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
if x_vec.size == 0:
return np.empty((0,), dtype=np.int64), np.empty((0,), dtype=np.int64)
mask = x_vec >= 0
rows = np.nonzero(mask)[0]
cols = x_vec[mask]
return rows.astype(np.int64, copy=False), cols.astype(np.int64, copy=False)
if not extend:
total_raw, x_raw, y_raw = _lapjvs_raw(a)
x_raw = np.asarray(x_raw, dtype=np.int64)
y_raw = np.asarray(y_raw, dtype=np.int64)
if jvx_like:
rows, cols = _rows_cols_from_x(x_raw)
if return_cost:
total = float(a[np.arange(n), x_raw].sum()) if n > 0 else 0.0
return total, rows, cols
else:
return rows, cols
else:
if return_cost:
total = float(a[np.arange(n), x_raw].sum()) if n > 0 else 0.0
return total, x_raw, y_raw
else:
return x_raw, y_raw
# Rectangular: zero-pad to square, solve, then trim back
size = max(n, m)
padded = np.empty((size, size), dtype=a.dtype)
padded[:n, :m] = a
if m < size:
padded[:n, m:] = 0
if n < size:
padded[n:, :] = 0
total_pad, x_pad, y_pad = _lapjvs_raw(padded)
x_pad = np.asarray(x_pad, dtype=np.int64)
y_pad = np.asarray(y_pad, dtype=np.int64)
cols_pad_n = x_pad[:n]
if jvx_like:
if n == 0 or m == 0:
rows = np.empty((0,), dtype=np.int64)
cols = np.empty((0,), dtype=np.int64)
total = 0.0
else:
mask_r = (cols_pad_n >= 0) & (cols_pad_n < m)
rows = np.nonzero(mask_r)[0].astype(np.int64, copy=False)
cols = cols_pad_n[mask_r].astype(np.int64, copy=False)
total = float(a[rows, cols].sum()) if (return_cost and rows.size) else 0.0
return (total, rows, cols) if return_cost else (rows, cols)
# lapjv-like outputs
tiny_threshold = 32
if max(n, m) <= tiny_threshold:
x_out = np.full(n, -1, dtype=np.int64)
for r in range(n):
c = int(cols_pad_n[r])
if 0 <= c < m:
x_out[r] = c
y_out = np.full(m, -1, dtype=np.int64)
rows_pad_m = y_pad[:m]
for c in range(m):
r = int(rows_pad_m[c])
if 0 <= r < n:
y_out[c] = r
else:
x_out = np.full(n, -1, dtype=np.int64)
mask_r = (cols_pad_n >= 0) & (cols_pad_n < m)
if mask_r.any():
r_idx = np.nonzero(mask_r)[0]
x_out[r_idx] = cols_pad_n[mask_r]
y_out = np.full(m, -1, dtype=np.int64)
rows_pad_m = y_pad[:m]
mask_c = (rows_pad_m >= 0) & (rows_pad_m < n)
if mask_c.any():
c_idx = np.nonzero(mask_c)[0]
y_out[c_idx] = rows_pad_m[mask_c]
if return_cost and n > 0 and m > 0:
mask = (x_out >= 0)
total = float(a[np.nonzero(mask)[0], x_out[mask]].sum()) if mask.any() else 0.0
else:
total = 0.0
return (total, x_out, y_out) if return_cost else (x_out, y_out)
#include <functional>
#include <memory>
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/arrayobject.h>
#include "lapjvs.h"
static char module_docstring[] =
"This module wraps LAPJVS - Jonker-Volgenant linear sum assignment algorithm (Scalar-only, no AVX2/SIMD).";
static char lapjvs_docstring[] =
"Solves the linear sum assignment problem (Scalar-only).";
static PyObject *py_lapjvs(PyObject *self, PyObject *args, PyObject *kwargs);
static PyMethodDef module_functions[] = {
{"lapjvs", reinterpret_cast<PyCFunction>(py_lapjvs),
METH_VARARGS | METH_KEYWORDS, lapjvs_docstring},
{NULL, NULL, 0, NULL}
};
extern "C" {
PyMODINIT_FUNC PyInit__lapjvs(void) {
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"lapjvs", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_functions, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
PyObject *m = PyModule_Create(&moduledef);
if (m == NULL) {
PyErr_SetString(PyExc_RuntimeError, "PyModule_Create() failed");
return NULL;
}
import_array();
return m;
}
}
template <typename O>
using pyobj_parent = std::unique_ptr<O, std::function<void(O*)>>;
template <typename O>
class _pyobj : public pyobj_parent<O> {
public:
_pyobj() : pyobj_parent<O>(
nullptr, [](O *p){ if (p) Py_DECREF(p); }) {}
explicit _pyobj(PyObject *ptr) : pyobj_parent<O>(
reinterpret_cast<O *>(ptr), [](O *p){ if(p) Py_DECREF(p); }) {}
void reset(PyObject *p) noexcept {
pyobj_parent<O>::reset(reinterpret_cast<O*>(p));
}
};
using pyobj = _pyobj<PyObject>;
using pyarray = _pyobj<PyArrayObject>;
template <typename F>
static always_inline double call_lap(int dim, const void *restrict cost_matrix,
bool verbose,
int *restrict row_ind, int *restrict col_ind,
void *restrict u, void *restrict v) {
double lapcost;
Py_BEGIN_ALLOW_THREADS
auto cost_matrix_typed = reinterpret_cast<const F*>(cost_matrix);
auto u_typed = reinterpret_cast<F*>(u);
auto v_typed = reinterpret_cast<F*>(v);
if (verbose) {
lapcost = lapjvs<true>(dim, cost_matrix_typed, row_ind, col_ind, u_typed, v_typed);
} else {
lapcost = lapjvs<false>(dim, cost_matrix_typed, row_ind, col_ind, u_typed, v_typed);
}
Py_END_ALLOW_THREADS
return lapcost;
}
static PyObject *py_lapjvs(PyObject *self, PyObject *args, PyObject *kwargs) {
PyObject *cost_matrix_obj;
int verbose = 0;
int force_doubles = 0;
int return_original = 0;
static const char *kwlist[] = {
"cost_matrix", "verbose", "force_doubles", "return_original", NULL};
if (!PyArg_ParseTupleAndKeywords(
args, kwargs, "O|pbb", const_cast<char**>(kwlist),
&cost_matrix_obj, &verbose, &force_doubles, &return_original)) {
return NULL;
}
// Restore fast default: process as float32 unless force_doubles is set.
pyarray cost_matrix_array;
bool float32 = true;
cost_matrix_array.reset(PyArray_FROM_OTF(
cost_matrix_obj, NPY_FLOAT32,
NPY_ARRAY_IN_ARRAY | (force_doubles ? 0 : NPY_ARRAY_FORCECAST)));
if (!cost_matrix_array) {
PyErr_Clear();
float32 = false;
cost_matrix_array.reset(PyArray_FROM_OTF(
cost_matrix_obj, NPY_FLOAT64, NPY_ARRAY_IN_ARRAY));
if (!cost_matrix_array) {
PyErr_SetString(PyExc_ValueError, "\"cost_matrix\" must be a numpy array of float32 or float64 dtype");
return NULL;
}
}
auto ndims = PyArray_NDIM(cost_matrix_array.get());
if (ndims != 2) {
PyErr_SetString(PyExc_ValueError, "\"cost_matrix\" must be a square 2D numpy array");
return NULL;
}
auto dims = PyArray_DIMS(cost_matrix_array.get());
if (dims[0] != dims[1]) {
PyErr_SetString(PyExc_ValueError, "\"cost_matrix\" must be a square 2D numpy array");
return NULL;
}
int dim = static_cast<int>(dims[0]);
if (dim <= 0) {
PyErr_SetString(PyExc_ValueError, "\"cost_matrix\"'s shape is invalid or too large");
return NULL;
}
auto cost_matrix = PyArray_DATA(cost_matrix_array.get());
npy_intp ret_dims[] = {dim, 0};
pyarray row_ind_array(PyArray_SimpleNew(1, ret_dims, NPY_INT));
pyarray col_ind_array(PyArray_SimpleNew(1, ret_dims, NPY_INT));
auto row_ind = reinterpret_cast<int*>(PyArray_DATA(row_ind_array.get()));
auto col_ind = reinterpret_cast<int*>(PyArray_DATA(col_ind_array.get()));
double lapcost;
if (return_original) {
// Allocate NumPy arrays for u, v only if they are returned.
pyarray u_array(PyArray_SimpleNew(
1, ret_dims, float32? NPY_FLOAT32 : NPY_FLOAT64));
pyarray v_array(PyArray_SimpleNew(
1, ret_dims, float32? NPY_FLOAT32 : NPY_FLOAT64));
auto u = PyArray_DATA(u_array.get());
auto v = PyArray_DATA(v_array.get());
if (float32) {
lapcost = call_lap<float>(dim, cost_matrix, verbose, row_ind, col_ind, u, v);
} else {
lapcost = call_lap<double>(dim, cost_matrix, verbose, row_ind, col_ind, u, v);
}
return Py_BuildValue("(OO(dOO))",
row_ind_array.get(), col_ind_array.get(), lapcost,
u_array.get(), v_array.get());
} else {
// Temporary heap buffers for u, v to avoid NumPy allocation overhead.
if (float32) {
std::unique_ptr<float[]> u(new float[dim]);
std::unique_ptr<float[]> v(new float[dim]);
lapcost = call_lap<float>(dim, cost_matrix, verbose, row_ind, col_ind, u.get(), v.get());
} else {
std::unique_ptr<double[]> u(new double[dim]);
std::unique_ptr<double[]> v(new double[dim]);
lapcost = call_lap<double>(dim, cost_matrix, verbose, row_ind, col_ind, u.get(), v.get());
}
return Py_BuildValue("(dOO)", lapcost, row_ind_array.get(), col_ind_array.get());
}
}
#include <cassert>
#include <cstdio>
#include <limits>
#include <memory>
#ifdef __GNUC__
#define always_inline __attribute__((always_inline)) inline
#define restrict __restrict__
#elif _WIN32
#define always_inline __forceinline
#define restrict __restrict
#else
#define always_inline inline
#define restrict
#endif
template <typename idx, typename cost>
always_inline std::tuple<cost, cost, idx, idx>
find_umins_regular(
idx dim, idx i, const cost *restrict assign_cost,
const cost *restrict v) {
const cost *local_cost = &assign_cost[i * dim];
cost umin = local_cost[0] - v[0];
idx j1 = 0;
idx j2 = -1;
cost usubmin = std::numeric_limits<cost>::max();
for (idx j = 1; j < dim; j++) {
cost h = local_cost[j] - v[j];
if (h < usubmin) {
if (h >= umin) {
usubmin = h;
j2 = j;
} else {
usubmin = umin;
umin = h;
j2 = j1;
j1 = j;
}
}
}
return std::make_tuple(umin, usubmin, j1, j2);
}
template <typename idx, typename cost>
always_inline std::tuple<cost, cost, idx, idx>
find_umins(
idx dim, idx i, const cost *restrict assign_cost,
const cost *restrict v) {
return find_umins_regular(dim, i, assign_cost, v);
}
/// @brief Exact Jonker-Volgenant algorithm (scalar-only).
/// @param dim in problem size
/// @param assign_cost in cost matrix
/// @param verbose in indicates whether to report the progress to stdout
/// @param rowsol out column assigned to row in solution / size dim
/// @param colsol out row assigned to column in solution / size dim
/// @param u out dual variables, row reduction numbers / size dim
/// @param v out dual variables, column reduction numbers / size dim
/// @return achieved minimum assignment cost
template <bool verbose, typename idx, typename cost>
cost lapjvs(int dim, const cost *restrict assign_cost, idx *restrict rowsol,
idx *restrict colsol, cost *restrict u, cost *restrict v) {
auto collist = std::make_unique<idx[]>(dim); // list of columns to be scanned in various ways.
auto matches = std::make_unique<idx[]>(dim); // counts how many times a row could be assigned.
auto d = std::make_unique<cost[]>(dim); // 'cost-distance' in augmenting path calculation.
auto pred = std::make_unique<idx[]>(dim); // row-predecessor of column in augmenting/alternating path.
// init how many times a row will be assigned in the column reduction.
for (idx i = 0; i < dim; i++) {
matches[i] = 0;
}
// COLUMN REDUCTION
for (idx j = dim - 1; j >= 0; j--) { // reverse order gives better results.
// find minimum cost over rows.
cost min = assign_cost[j];
idx imin = 0;
for (idx i = 1; i < dim; i++) {
const cost *local_cost = &assign_cost[i * dim];
if (local_cost[j] < min) {
min = local_cost[j];
imin = i;
}
}
v[j] = min;
if (++matches[imin] == 1) {
// init assignment if minimum row assigned for first time.
rowsol[imin] = j;
colsol[j] = imin;
} else {
colsol[j] = -1; // row already assigned, column not assigned.
}
}
if (verbose) {
printf("lapjvs: COLUMN REDUCTION finished\n");
}
// REDUCTION TRANSFER
auto free = matches.get(); // list of unassigned rows.
idx numfree = 0;
for (idx i = 0; i < dim; i++) {
const cost *local_cost = &assign_cost[i * dim];
if (matches[i] == 0) { // fill list of unassigned 'free' rows.
free[numfree++] = i;
} else if (matches[i] == 1) { // transfer reduction from rows that are assigned once.
idx j1 = rowsol[i];
cost min = std::numeric_limits<cost>::max();
for (idx j = 0; j < dim; j++) {
if (j != j1) {
if (local_cost[j] - v[j] < min) {
min = local_cost[j] - v[j];
}
}
}
v[j1] = v[j1] - min;
}
}
if (verbose) {
printf("lapjvs: REDUCTION TRANSFER finished\n");
}
// AUGMENTING ROW REDUCTION
for (int loopcnt = 0; loopcnt < 2; loopcnt++) { // loop to be done twice.
idx k = 0;
idx prevnumfree = numfree;
numfree = 0; // start list of rows still free after augmenting row reduction.
while (k < prevnumfree) {
idx i = free[k++];
// find minimum and second minimum reduced cost over columns.
cost umin, usubmin;
idx j1, j2;
std::tie(umin, usubmin, j1, j2) = find_umins(dim, i, assign_cost, v);
idx i0 = colsol[j1];
cost vj1_new = v[j1] - (usubmin - umin);
bool vj1_lowers = vj1_new < v[j1]; // the trick to eliminate the epsilon bug
if (vj1_lowers) {
v[j1] = vj1_new;
} else if (i0 >= 0) { // minimum and subminimum equal.
j1 = j2;
i0 = colsol[j2];
}
rowsol[i] = j1;
colsol[j1] = i;
if (i0 >= 0) {
if (vj1_lowers) {
free[--k] = i0;
} else {
free[numfree++] = i0;
}
}
}
if (verbose) {
printf("lapjvs: AUGMENTING ROW REDUCTION %d / %d\n", loopcnt + 1, 2);
}
}
// AUGMENT SOLUTION for each free row.
for (idx f = 0; f < numfree; f++) {
idx endofpath;
idx freerow = free[f]; // start row of augmenting path.
if (verbose) {
printf("lapjvs: AUGMENT SOLUTION row %d [%d / %d]\n",
freerow, f + 1, numfree);
}
// Dijkstra shortest path algorithm.
for (idx j = 0; j < dim; j++) {
d[j] = assign_cost[freerow * dim + j] - v[j];
pred[j] = freerow;
collist[j] = j;
}
idx low = 0;
idx up = 0;
bool unassigned_found = false;
idx last = 0;
cost min = 0;
do {
if (up == low) {
last = low - 1;
min = d[collist[up++]];
for (idx k = up; k < dim; k++) {
idx j = collist[k];
cost h = d[j];
if (h <= min) {
if (h < min) {
up = low;
min = h;
}
collist[k] = collist[up];
collist[up++] = j;
}
}
for (idx k = low; k < up; k++) {
if (colsol[collist[k]] < 0) {
endofpath = collist[k];
unassigned_found = true;
break;
}
}
}
if (!unassigned_found) {
idx j1 = collist[low];
low++;
idx i = colsol[j1];
const cost *local_cost = &assign_cost[i * dim];
cost h = local_cost[j1] - v[j1] - min;
for (idx k = up; k < dim; k++) {
idx j = collist[k];
cost v2 = local_cost[j] - v[j] - h;
if (v2 < d[j]) {
pred[j] = i;
if (v2 == min) {
if (colsol[j] < 0) {
endofpath = j;
unassigned_found = true;
break;
} else {
collist[k] = collist[up];
collist[up++] = j;
}
}
d[j] = v2;
}
}
}
} while (!unassigned_found);
for (idx k = 0; k <= last; k++) {
idx j1 = collist[k];
v[j1] = v[j1] + d[j1] - min;
}
{
idx i;
do {
i = pred[endofpath];
colsol[endofpath] = i;
idx j1 = endofpath;
endofpath = rowsol[i];
rowsol[i] = j1;
} while (i != freerow);
}
}
if (verbose) {
printf("lapjvs: AUGMENT SOLUTION finished\n");
}
// calculate optimal cost.
cost lapcost = 0;
for (idx i = 0; i < dim; i++) {
const cost *local_cost = &assign_cost[i * dim];
idx j = rowsol[i];
u[i] = local_cost[j] - v[j];
lapcost += local_cost[j];
}
if (verbose) {
printf("lapjvs: optimal cost calculated\n");
}
return lapcost;
}
MIT License
Copyright (c) 2016 Vadim Markovtsev (source{d})
Copyright (c) 2025 Ratha SIV
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
+18
-45
# Copyright (c) 2025 Ratha SIV | MIT License
"""LAPX
"""
LAPX — Jonker-Volgenant (JV) linear assignment solvers.
A linear assignment problem solver using the Jonker-Volgenant
algorithm, providing:
- Sparse (lapmod)
- Enhanced dense (lapjv, lapjvx, lapjvxa)
- Classic dense (lapjvc)
Common Parameters for lapjv, lapjvx, lapjvxa

@@ -24,41 +18,21 @@ --------------------------------------------

Assignment API Overview
-----------------------
Provided solvers
----------------
- lapmod : Sparse assignment solver (for sparse cost matrices) by Tomas Kazmar's lap.
- lapjv : JV assignment solver by Tomas Kazmar's lap; returns JV-style mappings (x, y).
- lapjvx : Enhanced lapjv by lapx; returns with SciPy-like outputs (rows, cols).
- lapjvxa : Convenience wrapper of lapjvx by lapx; returns (K, 2) assignment pairs.
- lapjvc : Classic JV variant by Christoph Heindl's lapsolver; returns (rows, cols).
- lapjvs : Enhanced Vadim Markovtsev's lapjv by lapx; returns either style.
There are *multiple interfaces* for dense assignment problems:
lapmod
Find optimal (minimum-cost) assignment for a sparse cost matrix.
lapjv
Enhanced Jonker-Volgenant for dense cost matrices.
**Returns:** (x, y) or (cost, x, y)
- `x[i]`: assigned column for row i (or -1 if unassigned).
- `y[j]`: assigned row for column j (or -1 if unassigned).
- Not in parallel assignment format—**do not use** `list(zip(x, y))` directly!
lapjvx
Enhanced Jonker-Volgenant with practical output.
**Returns:** (cost, row_indices, col_indices) or (row_indices, col_indices)
- Parallel arrays: row_indices[i] assigned to col_indices[i].
- Matches the output style of ``scipy.optimize.linear_sum_assignment`` and ``lapjvc``.
lapjvxa
Enhanced Jonker-Volgenant with assignment array output.
**Returns:** (cost, assignments) or (assignments)
- `assignments`: array of shape (K, 2), each row is (row_index, col_index).
- Most convenient for direct iteration or indexing.
lapjvc
Classic Jonker-Volgenant for dense cost matrices.
**Returns:** (cost, row_indices, col_indices) or (row_indices, col_indices)
- `row_indices` and `col_indices` are parallel arrays: row_indices[i] assigned to col_indices[i].
- You can do: `assignments = np.array(list(zip(row_indices, col_indices)))`
Notes
-----
- All solvers in lapx handle both square and rectangular cost matrices.
- Output formats may differ by solvers and input parameters.
- For details and benchmarks, see the official repo https://github.com/rathaROG/lapx .
"""
__version__ = '0.6.2'
__version__ = '0.7.0'
from .lapmod import lapmod
from ._lapjv import (

@@ -71,3 +45,2 @@ lapjv,

)
from ._lapjvx import (

@@ -77,5 +50,5 @@ lapjvx,

)
from ._lapjvc import lapjvc
from .lapjvs import lapjvs
__all__ = ['lapmod', 'lapjv', 'lapjvx', 'lapjvxa', 'lapjvc', 'FP_1', 'FP_2', 'FP_DYNAMIC', 'LARGE']
__all__ = ['lapmod', 'lapjv', 'lapjvx', 'lapjvxa', 'lapjvc', 'lapjvs', 'FP_1', 'FP_2', 'FP_DYNAMIC', 'LARGE']
Metadata-Version: 2.4
Name: lapx
Version: 0.6.2
Summary: Linear Assignment Problem solver (LAPJV/LAPMOD).
Version: 0.7.0
Summary: Linear assignment problem solvers
Home-page: https://github.com/rathaROG/lapx

@@ -9,3 +9,3 @@ Author: Ratha SIV

License: MIT
Keywords: Linear Assignment Problem Solver,LAP solver,Jonker-Volgenant Algorithm,LAPJV,LAPMOD,lap,lapx,lapjvx,lapjvxa,lapjvc
Keywords: Linear Assignment Problem Solver,LAP solver,Jonker-Volgenant Algorithm,LAPJV,LAPMOD,lap,lapx,lapjvx,lapjvxa,lapjvc,lapjvs
Classifier: Development Status :: 4 - Beta

@@ -55,2 +55,3 @@ Classifier: Environment :: Console

- 2025/10/21: `lapx` [v0.7.0](https://github.com/rathaROG/lapx/releases/tag/v0.7.0) introduced **`lapjvs()`**.
- 2025/10/16: `lapx` [v0.6.0](https://github.com/rathaROG/lapx/releases/tag/v0.6.0) introduced **`lapjvx()`**, **`lapjvxa()`**, and **`lapjvc()`**.

@@ -69,9 +70,15 @@ - 2025/10/15: Added Python 3.14 support and [more](https://github.com/rathaROG/lapx/pull/15).

# Linear Assignment Problem Solver
# Linear Assignment Problem Solvers
`lapx` is an enhanced version of Tomas Kazmar's [`lap`](https://github.com/gatagat/lap), featuring the core **`lapjv()`** and **`lapmod()`** functions along with three additional functions — **`lapjvx()`**, **`lapjvxa()`**, and **`lapjvc()`** — introduced in [v0.6.0](https://github.com/rathaROG/lapx/releases/tag/v0.6.0).
`lapx` was initially created to maintain Tomas Kazmar's [`lap`](https://github.com/gatagat/lap), but has since evolved to offer much more.
`lapx` features the original **`lapjv()`** and **`lapmod()`** functions, and since **v0.6.0**, `lapx` has introduced three additional assignment solvers:
- **`lapjvx()`** and **`lapjvxa()`** — enhanced versions of [`lap.lapjv()`](https://github.com/gatagat/lap) with more flexible output formats
- **`lapjvc()`** — an enhanced version of Christoph Heindl’s [`lapsolver.solve_dense()`](https://github.com/cheind/py-lapsolver) with unified output formats
`lapx` **v0.7.0** has introduced a new function: **`lapjvs()`** — an enhanced version of Vadim Markovtsev’s [`lapjv`](https://github.com/src-d/lapjv), supporting both rectangular and square cost matrices, with flexible output styles.
<details><summary>Read more</summary><br>
Tomas Kazmar's [`lap`](https://github.com/gatagat/lap) is a [linear assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) solver using Jonker-Volgenant algorithm for dense LAPJV ¹ or sparse LAPMOD ² matrices. Both algorithms are implemented from scratch based solely on the papers ¹˒² and the public domain Pascal implementation provided by A. Volgenant ³. The LAPMOD implementation seems to be faster than the LAPJV implementation for matrices with a side of more than ~5000 and with less than 50% finite coefficients.
All [linear assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) solvers in `lapx` are based on Jonker-Volgenant algorithm for dense LAPJV ¹ or sparse LAPMOD ² matrices. Tomas Kazmar's [`lap`](https://github.com/gatagat/lap) implemented the core **`lapjv()`** and **`lapmod()`** from scratch based solely on the papers ¹˒² and the public domain Pascal implementation provided by A. Volgenant ³.

@@ -175,3 +182,3 @@ <sup>¹ R. Jonker and A. Volgenant, "A Shortest Augmenting Path Algorithm for Dense and Sparse Linear Assignment Problems", Computing 38, 325-340 (1987) </sup><br>

This function `lapjvx()` basically is `lapjv()`, but it matches the return style of SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html). You can see how it compares to others in the Object Tracking benchmark [here](https://github.com/rathaROG/lapx/blob/main/benchmark.md#-object-tracking).
`lapjvx()` basically is `lapjv()`, but it matches the return style of SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html) with no additional overhead. You can see how it compares to others in the Object Tracking benchmark [here](https://github.com/rathaROG/lapx/blob/main/benchmark.md#-object-tracking).

@@ -190,3 +197,3 @@ ```python

This function `lapjvxa()` is essentially the same as `lapjvx()`, but it returns assignments with shape `(K, 2)` directly — no additional or manual post-processing required. `lapjvxa()` is intended for applications that only need the final assignments and do not require control over the `cost_limit` parameter.
`lapjvxa()` is essentially the same as `lapjvx()`, but it returns assignments with shape `(K, 2)` directly — no additional or manual post-processing required. `lapjvxa()` is optimized for applications that only need the final assignments and do not require control over the `cost_limit` parameter.

@@ -206,3 +213,3 @@ ```python

This function `lapjvc()`, which is the classical implementation of Jonker-Volgenant — [py-lapsolver](https://github.com/cheind/py-lapsolver), is as fast as (if not faster than) other functions when `n=m` (the cost matrix is square), but it is much slower when `n≠m` (the cost matrix is not square). This function adopts the return style of `lapjvx()` — the same as SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html).
`lapjvc()` is an enhanced version of Christoph Heindl's [py-lapsolver](https://github.com/cheind/py-lapsolver). `lapjvc()` is as fast as (if not faster than) other functions when `n=m` (the cost matrix is square), but it is much slower when `n≠m` (the cost matrix is rectangular). This function adopts the return style of `lapjvx()` — the same as SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html).

@@ -219,7 +226,23 @@ ```python

<details><summary>Show <code>lapjvs()</code></summary>
### 5. The new function ``lapjvs()``
`lapjvs()` is an enhanced version of Vadim Markovtsev's [`lapjv`](https://github.com/src-d/lapjv). While `lapjvs()` does not use CPU special instruction sets like the original implementation, it still delivers comparable performance. It natively supports both square and rectangular cost matrices and can produce output either in SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html) style or `(x, y)` mappings. See the [docstring here](https://github.com/rathaROG/lapx/blob/main/lap/lapjvs.py) for more details.
```python
import numpy as np, lap
# row_indices, col_indices = lap.lapjvs(np.random.rand(4, 5), return_cost=False, jvx_like=True)
total_cost, row_indices, col_indices = lap.lapjvs(np.random.rand(4, 5), return_cost=True, jvx_like=True)
assignments = np.array(list(zip(row_indices, col_indices)))
```
</details>
<details><summary>Show <code>lapmod()</code></summary>
### 5. The original function ``lapmod()``
### 6. The original function ``lapmod()``
For see [the docstring](https://github.com/rathaROG/lapx/blob/8d56b42265a23c3b5a290b1039dacaac70dfe60d/lap/lapmod.py#L275) for details.
For see the [docstring](https://github.com/rathaROG/lapx/blob/8d56b42265a23c3b5a290b1039dacaac70dfe60d/lap/lapmod.py#L275) for details.

@@ -226,0 +249,0 @@ ```python

@@ -9,2 +9,3 @@ LICENSE

lap/__init__.py
lap/lapjvs.py
lap/lapmod.py

@@ -27,2 +28,5 @@ lapx.egg-info/PKG-INFO

src/_lapjvc/dense_wrap.hpp
src/_lapjvc/lapjvc.cpp
src/_lapjvc/lapjvc.cpp
src/_lapjvs/LICENSE
src/_lapjvs/lapjvs.cpp
src/_lapjvs/lapjvs.h
+16
-2

@@ -6,3 +6,3 @@ NOTICE

Portions of the software are derived from two other projects as follows:
Portions of the software are derived from three other projects as follows:

@@ -40,3 +40,17 @@ ---

3. Project: lajv
- Author: Vadim Markovtsev (source{d})
- License: MIT License
- URL: https://github.com/src-d/lapjv
- Copyright (c) 2016 Vadim Markovtsev (source{d})
All source files under `src/_lapjvs/` are derived from the original project
and retain their MIT License.
See `src/_lapjvs/LICENSE` for more details.
---
For a full list of contributors of the project:
https://github.com/rathaROG/lapx/graphs/contributors
https://github.com/rathaROG/lapx/graphs/contributors
+34
-11
Metadata-Version: 2.4
Name: lapx
Version: 0.6.2
Summary: Linear Assignment Problem solver (LAPJV/LAPMOD).
Version: 0.7.0
Summary: Linear assignment problem solvers
Home-page: https://github.com/rathaROG/lapx

@@ -9,3 +9,3 @@ Author: Ratha SIV

License: MIT
Keywords: Linear Assignment Problem Solver,LAP solver,Jonker-Volgenant Algorithm,LAPJV,LAPMOD,lap,lapx,lapjvx,lapjvxa,lapjvc
Keywords: Linear Assignment Problem Solver,LAP solver,Jonker-Volgenant Algorithm,LAPJV,LAPMOD,lap,lapx,lapjvx,lapjvxa,lapjvc,lapjvs
Classifier: Development Status :: 4 - Beta

@@ -55,2 +55,3 @@ Classifier: Environment :: Console

- 2025/10/21: `lapx` [v0.7.0](https://github.com/rathaROG/lapx/releases/tag/v0.7.0) introduced **`lapjvs()`**.
- 2025/10/16: `lapx` [v0.6.0](https://github.com/rathaROG/lapx/releases/tag/v0.6.0) introduced **`lapjvx()`**, **`lapjvxa()`**, and **`lapjvc()`**.

@@ -69,9 +70,15 @@ - 2025/10/15: Added Python 3.14 support and [more](https://github.com/rathaROG/lapx/pull/15).

# Linear Assignment Problem Solver
# Linear Assignment Problem Solvers
`lapx` is an enhanced version of Tomas Kazmar's [`lap`](https://github.com/gatagat/lap), featuring the core **`lapjv()`** and **`lapmod()`** functions along with three additional functions — **`lapjvx()`**, **`lapjvxa()`**, and **`lapjvc()`** — introduced in [v0.6.0](https://github.com/rathaROG/lapx/releases/tag/v0.6.0).
`lapx` was initially created to maintain Tomas Kazmar's [`lap`](https://github.com/gatagat/lap), but has since evolved to offer much more.
`lapx` features the original **`lapjv()`** and **`lapmod()`** functions, and since **v0.6.0**, `lapx` has introduced three additional assignment solvers:
- **`lapjvx()`** and **`lapjvxa()`** — enhanced versions of [`lap.lapjv()`](https://github.com/gatagat/lap) with more flexible output formats
- **`lapjvc()`** — an enhanced version of Christoph Heindl’s [`lapsolver.solve_dense()`](https://github.com/cheind/py-lapsolver) with unified output formats
`lapx` **v0.7.0** has introduced a new function: **`lapjvs()`** — an enhanced version of Vadim Markovtsev’s [`lapjv`](https://github.com/src-d/lapjv), supporting both rectangular and square cost matrices, with flexible output styles.
<details><summary>Read more</summary><br>
Tomas Kazmar's [`lap`](https://github.com/gatagat/lap) is a [linear assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) solver using Jonker-Volgenant algorithm for dense LAPJV ¹ or sparse LAPMOD ² matrices. Both algorithms are implemented from scratch based solely on the papers ¹˒² and the public domain Pascal implementation provided by A. Volgenant ³. The LAPMOD implementation seems to be faster than the LAPJV implementation for matrices with a side of more than ~5000 and with less than 50% finite coefficients.
All [linear assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) solvers in `lapx` are based on Jonker-Volgenant algorithm for dense LAPJV ¹ or sparse LAPMOD ² matrices. Tomas Kazmar's [`lap`](https://github.com/gatagat/lap) implemented the core **`lapjv()`** and **`lapmod()`** from scratch based solely on the papers ¹˒² and the public domain Pascal implementation provided by A. Volgenant ³.

@@ -175,3 +182,3 @@ <sup>¹ R. Jonker and A. Volgenant, "A Shortest Augmenting Path Algorithm for Dense and Sparse Linear Assignment Problems", Computing 38, 325-340 (1987) </sup><br>

This function `lapjvx()` basically is `lapjv()`, but it matches the return style of SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html). You can see how it compares to others in the Object Tracking benchmark [here](https://github.com/rathaROG/lapx/blob/main/benchmark.md#-object-tracking).
`lapjvx()` basically is `lapjv()`, but it matches the return style of SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html) with no additional overhead. You can see how it compares to others in the Object Tracking benchmark [here](https://github.com/rathaROG/lapx/blob/main/benchmark.md#-object-tracking).

@@ -190,3 +197,3 @@ ```python

This function `lapjvxa()` is essentially the same as `lapjvx()`, but it returns assignments with shape `(K, 2)` directly — no additional or manual post-processing required. `lapjvxa()` is intended for applications that only need the final assignments and do not require control over the `cost_limit` parameter.
`lapjvxa()` is essentially the same as `lapjvx()`, but it returns assignments with shape `(K, 2)` directly — no additional or manual post-processing required. `lapjvxa()` is optimized for applications that only need the final assignments and do not require control over the `cost_limit` parameter.

@@ -206,3 +213,3 @@ ```python

This function `lapjvc()`, which is the classical implementation of Jonker-Volgenant — [py-lapsolver](https://github.com/cheind/py-lapsolver), is as fast as (if not faster than) other functions when `n=m` (the cost matrix is square), but it is much slower when `n≠m` (the cost matrix is not square). This function adopts the return style of `lapjvx()` — the same as SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html).
`lapjvc()` is an enhanced version of Christoph Heindl's [py-lapsolver](https://github.com/cheind/py-lapsolver). `lapjvc()` is as fast as (if not faster than) other functions when `n=m` (the cost matrix is square), but it is much slower when `n≠m` (the cost matrix is rectangular). This function adopts the return style of `lapjvx()` — the same as SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html).

@@ -219,7 +226,23 @@ ```python

<details><summary>Show <code>lapjvs()</code></summary>
### 5. The new function ``lapjvs()``
`lapjvs()` is an enhanced version of Vadim Markovtsev's [`lapjv`](https://github.com/src-d/lapjv). While `lapjvs()` does not use CPU special instruction sets like the original implementation, it still delivers comparable performance. It natively supports both square and rectangular cost matrices and can produce output either in SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html) style or `(x, y)` mappings. See the [docstring here](https://github.com/rathaROG/lapx/blob/main/lap/lapjvs.py) for more details.
```python
import numpy as np, lap
# row_indices, col_indices = lap.lapjvs(np.random.rand(4, 5), return_cost=False, jvx_like=True)
total_cost, row_indices, col_indices = lap.lapjvs(np.random.rand(4, 5), return_cost=True, jvx_like=True)
assignments = np.array(list(zip(row_indices, col_indices)))
```
</details>
<details><summary>Show <code>lapmod()</code></summary>
### 5. The original function ``lapmod()``
### 6. The original function ``lapmod()``
For see [the docstring](https://github.com/rathaROG/lapx/blob/8d56b42265a23c3b5a290b1039dacaac70dfe60d/lap/lapmod.py#L275) for details.
For see the [docstring](https://github.com/rathaROG/lapx/blob/8d56b42265a23c3b5a290b1039dacaac70dfe60d/lap/lapmod.py#L275) for details.

@@ -226,0 +249,0 @@ ```python

<details><summary>🆕 What's new</summary><br>
- 2025/10/21: `lapx` [v0.7.0](https://github.com/rathaROG/lapx/releases/tag/v0.7.0) introduced **`lapjvs()`**.
- 2025/10/16: `lapx` [v0.6.0](https://github.com/rathaROG/lapx/releases/tag/v0.6.0) introduced **`lapjvx()`**, **`lapjvxa()`**, and **`lapjvc()`**.

@@ -16,9 +17,15 @@ - 2025/10/15: Added Python 3.14 support and [more](https://github.com/rathaROG/lapx/pull/15).

# Linear Assignment Problem Solver
# Linear Assignment Problem Solvers
`lapx` is an enhanced version of Tomas Kazmar's [`lap`](https://github.com/gatagat/lap), featuring the core **`lapjv()`** and **`lapmod()`** functions along with three additional functions — **`lapjvx()`**, **`lapjvxa()`**, and **`lapjvc()`** — introduced in [v0.6.0](https://github.com/rathaROG/lapx/releases/tag/v0.6.0).
`lapx` was initially created to maintain Tomas Kazmar's [`lap`](https://github.com/gatagat/lap), but has since evolved to offer much more.
`lapx` features the original **`lapjv()`** and **`lapmod()`** functions, and since **v0.6.0**, `lapx` has introduced three additional assignment solvers:
- **`lapjvx()`** and **`lapjvxa()`** — enhanced versions of [`lap.lapjv()`](https://github.com/gatagat/lap) with more flexible output formats
- **`lapjvc()`** — an enhanced version of Christoph Heindl’s [`lapsolver.solve_dense()`](https://github.com/cheind/py-lapsolver) with unified output formats
`lapx` **v0.7.0** has introduced a new function: **`lapjvs()`** — an enhanced version of Vadim Markovtsev’s [`lapjv`](https://github.com/src-d/lapjv), supporting both rectangular and square cost matrices, with flexible output styles.
<details><summary>Read more</summary><br>
Tomas Kazmar's [`lap`](https://github.com/gatagat/lap) is a [linear assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) solver using Jonker-Volgenant algorithm for dense LAPJV ¹ or sparse LAPMOD ² matrices. Both algorithms are implemented from scratch based solely on the papers ¹˒² and the public domain Pascal implementation provided by A. Volgenant ³. The LAPMOD implementation seems to be faster than the LAPJV implementation for matrices with a side of more than ~5000 and with less than 50% finite coefficients.
All [linear assignment problem](https://en.wikipedia.org/wiki/Assignment_problem) solvers in `lapx` are based on Jonker-Volgenant algorithm for dense LAPJV ¹ or sparse LAPMOD ² matrices. Tomas Kazmar's [`lap`](https://github.com/gatagat/lap) implemented the core **`lapjv()`** and **`lapmod()`** from scratch based solely on the papers ¹˒² and the public domain Pascal implementation provided by A. Volgenant ³.

@@ -122,3 +129,3 @@ <sup>¹ R. Jonker and A. Volgenant, "A Shortest Augmenting Path Algorithm for Dense and Sparse Linear Assignment Problems", Computing 38, 325-340 (1987) </sup><br>

This function `lapjvx()` basically is `lapjv()`, but it matches the return style of SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html). You can see how it compares to others in the Object Tracking benchmark [here](https://github.com/rathaROG/lapx/blob/main/benchmark.md#-object-tracking).
`lapjvx()` basically is `lapjv()`, but it matches the return style of SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html) with no additional overhead. You can see how it compares to others in the Object Tracking benchmark [here](https://github.com/rathaROG/lapx/blob/main/benchmark.md#-object-tracking).

@@ -137,3 +144,3 @@ ```python

This function `lapjvxa()` is essentially the same as `lapjvx()`, but it returns assignments with shape `(K, 2)` directly — no additional or manual post-processing required. `lapjvxa()` is intended for applications that only need the final assignments and do not require control over the `cost_limit` parameter.
`lapjvxa()` is essentially the same as `lapjvx()`, but it returns assignments with shape `(K, 2)` directly — no additional or manual post-processing required. `lapjvxa()` is optimized for applications that only need the final assignments and do not require control over the `cost_limit` parameter.

@@ -153,3 +160,3 @@ ```python

This function `lapjvc()`, which is the classical implementation of Jonker-Volgenant — [py-lapsolver](https://github.com/cheind/py-lapsolver), is as fast as (if not faster than) other functions when `n=m` (the cost matrix is square), but it is much slower when `n≠m` (the cost matrix is not square). This function adopts the return style of `lapjvx()` — the same as SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html).
`lapjvc()` is an enhanced version of Christoph Heindl's [py-lapsolver](https://github.com/cheind/py-lapsolver). `lapjvc()` is as fast as (if not faster than) other functions when `n=m` (the cost matrix is square), but it is much slower when `n≠m` (the cost matrix is rectangular). This function adopts the return style of `lapjvx()` — the same as SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html).

@@ -166,7 +173,23 @@ ```python

<details><summary>Show <code>lapjvs()</code></summary>
### 5. The new function ``lapjvs()``
`lapjvs()` is an enhanced version of Vadim Markovtsev's [`lapjv`](https://github.com/src-d/lapjv). While `lapjvs()` does not use CPU special instruction sets like the original implementation, it still delivers comparable performance. It natively supports both square and rectangular cost matrices and can produce output either in SciPy's [`linear_sum_assignment`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html) style or `(x, y)` mappings. See the [docstring here](https://github.com/rathaROG/lapx/blob/main/lap/lapjvs.py) for more details.
```python
import numpy as np, lap
# row_indices, col_indices = lap.lapjvs(np.random.rand(4, 5), return_cost=False, jvx_like=True)
total_cost, row_indices, col_indices = lap.lapjvs(np.random.rand(4, 5), return_cost=True, jvx_like=True)
assignments = np.array(list(zip(row_indices, col_indices)))
```
</details>
<details><summary>Show <code>lapmod()</code></summary>
### 5. The original function ``lapmod()``
### 6. The original function ``lapmod()``
For see [the docstring](https://github.com/rathaROG/lapx/blob/8d56b42265a23c3b5a290b1039dacaac70dfe60d/lap/lapmod.py#L275) for details.
For see the [docstring](https://github.com/rathaROG/lapx/blob/8d56b42265a23c3b5a290b1039dacaac70dfe60d/lap/lapmod.py#L275) for details.

@@ -173,0 +196,0 @@ ```python

+33
-11

@@ -6,3 +6,3 @@ # Copyright (c) 2025 Ratha SIV | MIT License

LICENSE = "MIT"
DESCRIPTION = "Linear Assignment Problem solver (LAPJV/LAPMOD)."
DESCRIPTION = "Linear assignment problem solvers"
LONG_DESCRIPTION = open("README.md", encoding="utf-8").read()

@@ -31,2 +31,3 @@

import os
import sys
from Cython.Build import cythonize

@@ -37,2 +38,3 @@

SRC_DIR_JVC = os.path.join('src', '_lapjvc')
SRC_DIR_JVS = os.path.join('src', '_lapjvs')

@@ -50,2 +52,12 @@ # Source files for lapjv/lapmod

# Source file for lapjvs
lapjvscpp = os.path.join(SRC_DIR_JVS, 'lapjvs.cpp')
# C++ standard on different platforms
if sys.platform == "win32":
# extra_compile_args = ["/std:c++17"]
extra_compile_args = ["/std:c++latest"]
else:
extra_compile_args = ["-std=c++17"]
# Extension for lapjv/lapmod

@@ -57,3 +69,3 @@ ext_jv = Extension(

language='c++',
extra_compile_args=['-std=c++17'],
extra_compile_args=extra_compile_args,
)

@@ -67,3 +79,3 @@

language='c++',
extra_compile_args=['-std=c++17'],
extra_compile_args=extra_compile_args,
)

@@ -77,7 +89,16 @@

language='c++',
extra_compile_args=['-std=c++17'],
extra_compile_args=extra_compile_args,
)
ext_modules = cythonize([ext_jv, ext_jvx]) + [ext_jvc]
# Extension for lapjvs
ext_jvs = Extension(
name='lap._lapjvs',
sources=[lapjvscpp],
include_dirs=[include_numpy(), SRC_DIR_JVS, PACKAGE_PATH],
language='c++',
extra_compile_args=extra_compile_args,
)
ext_modules = cythonize([ext_jv, ext_jvx]) + [ext_jvc, ext_jvs]
setup(

@@ -93,7 +114,8 @@ name=PACKAGE_NAME,

long_description_content_type="text/markdown",
keywords=['Linear Assignment Problem Solver', 'LAP solver',
'Jonker-Volgenant Algorithm', 'LAPJV', 'LAPMOD',
'lap', 'lapx', 'lapjvx', 'lapjvxa', 'lapjvc'],
keywords=['Linear Assignment Problem Solver', 'LAP solver',
'Jonker-Volgenant Algorithm', 'LAPJV', 'LAPMOD',
'lap', 'lapx', 'lapjvx', 'lapjvxa', 'lapjvc', 'lapjvs'],
packages=find_packages(include=[PACKAGE_PATH, f"{PACKAGE_PATH}.*"]),
include_package_data=True,
package_data={'lap': ['default_autotune.json']},
install_requires=['numpy>=1.21.6',],

@@ -121,3 +143,3 @@ python_requires=">=3.7",

'Topic :: Software Development :: Libraries',
'Operating System :: Microsoft :: Windows',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',

@@ -131,3 +153,3 @@ 'Operating System :: Unix',

"""
Recommend using :py:mod:`build` to build the package as it does not
Recommend using :py:mod:`build` to build the package as it does not
disrupt your current environment.

@@ -138,3 +160,3 @@

>>> python -m build --wheel
"""
"""
main_setup()
BSD 2-Clause License
Copyright (c) 2012-2025, Tomas Kazmar
Copyright (c) 2025, Ratha SIV

@@ -5,0 +6,0 @@ Redistribution and use in source and binary forms, with or without

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is too big to display