├── tests
├── __init__.py
├── test_utils
│ └── __init__.py
├── test_dashboard
│ ├── __init__.py
│ └── test_gantt.py
├── test_datasets
│ └── __init__.py
├── test_objective
│ └── __init__.py
├── test_solver
│ ├── __init__.py
│ ├── test_temp
│ │ ├── __init__.py
│ │ └── continious_rule.py
│ ├── test_meta_heuristics
│ │ ├── __init__.py
│ │ ├── test_nsga3.py
│ │ ├── test_genetic.py
│ │ ├── test_tabu_search.py
│ │ └── test_branch_and_bound.py
│ ├── test_construction_heuristics
│ │ ├── __init__.py
│ │ ├── test_lpst.py
│ │ ├── test_rule.py
│ │ └── test_spt.py
│ └── test_reinforcement_learning
│ │ ├── __init__.py
│ │ └── test_dqn.py
└── test_lekin_struct
│ ├── __init__.py
│ ├── test_route.py
│ ├── test_operation.py
│ ├── test_resource.py
│ ├── test_timeslot.py
│ └── test_job.py
├── .github
├── conda
│ ├── build.sh
│ └── meta.yaml
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ ├── feature-request.md
│ └── question.md
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── pypi_release.yml
│ ├── conda_release.yml
│ ├── lint.yml
│ ├── codeql-analysis.yml
│ └── test.yml
├── examples
├── __init__.py
├── data
│ ├── MOInput.xlsx
│ ├── JSP_dataset.xlsx
│ └── k1.json
├── README.md
├── jobshop_example.py
└── rule_example.py
├── lekin
├── utils
│ ├── __init__.py
│ ├── push_dense.py
│ └── group_op_ds.py
├── constraint
│ └── __init__.py
├── dashboard
│ ├── __init__.py
│ ├── pages.py
│ └── gantt.py
├── datasets
│ ├── __init__.py
│ ├── parse_data.py
│ ├── get_data.py
│ └── check_data.py
├── solver
│ ├── core
│ │ ├── __init__.py
│ │ ├── base_solver.py
│ │ ├── problem.py
│ │ └── solution.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── push_dense.py
│ │ └── time_slot.py
│ ├── meta_heuristics
│ │ ├── pso.py
│ │ ├── __init__.py
│ │ ├── encoding.py
│ │ ├── hill_climbing.py
│ │ ├── shifting_bottle_neck.py
│ │ ├── branch_and_bound.py
│ │ ├── critical_path.py
│ │ ├── nsga3.py
│ │ ├── variable_neighborhood_search.py
│ │ └── genetic.py
│ ├── operation_research
│ │ ├── __init__.py
│ │ └── ortool.py
│ ├── reinforcement_learning
│ │ ├── __init__.py
│ │ └── q_learning.py
│ ├── construction_heuristics
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── edd.py
│ │ ├── fifo.py
│ │ ├── cr.py
│ │ ├── atcs.py
│ │ ├── lsf.py
│ │ ├── mix.py
│ │ ├── spt.py
│ │ ├── forward.py
│ │ └── backward.py
│ ├── config.py
│ ├── __init__.py
│ ├── solver.py
│ └── constraints
│ │ └── base.py
├── __init__.py
├── objective
│ ├── makespan.py
│ ├── tardiness.py
│ ├── score.py
│ └── __init__.py
├── lekin_struct
│ ├── exceptions.py
│ ├── __init__.py
│ ├── execution_mode.py
│ ├── allocation.py
│ ├── relation.py
│ ├── timeslot.py
│ └── route.py
└── scheduler.py
├── requirements.txt
├── docs
├── source
│ ├── _static
│ │ ├── backward-scheduling.png
│ │ ├── forward-scheduling.png
│ │ └── logo.svg
│ ├── heuristics.rst
│ ├── _templates
│ │ └── custom-module-template.rst
│ ├── index.rst
│ ├── rules.rst
│ ├── application.rst
│ └── conf.py
├── requirements_docs.txt
├── Makefile
└── make.bat
├── docker
└── Dockerfile
├── codecov.yml
├── CHANGELOG.md
├── Makefile
├── .pre-commit-config.yaml
├── .readthedocs.yml
├── .gitignore
├── setup.cfg
├── pyproject.toml
├── README_zh_CN.md
└── README.md
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.github/conda/build.sh:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.github/conda/meta.yaml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/examples/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lekin/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lekin/constraint/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lekin/dashboard/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lekin/datasets/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lekin/datasets/parse_data.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lekin/solver/core/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lekin/solver/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_dashboard/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_datasets/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_objective/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_solver/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_dashboard/test_gantt.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_lekin_struct/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_lekin_struct/test_route.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_lekin_struct/test_operation.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_lekin_struct/test_resource.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_lekin_struct/test_timeslot.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_solver/test_temp/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy<=1.26.0
2 | pandas
3 |
--------------------------------------------------------------------------------
/tests/test_solver/test_meta_heuristics/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_solver/test_meta_heuristics/test_nsga3.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_solver/test_construction_heuristics/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_solver/test_meta_heuristics/test_genetic.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_solver/test_meta_heuristics/test_tabu_search.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_solver/test_reinforcement_learning/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/test_solver/test_reinforcement_learning/test_dqn.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/pso.py:
--------------------------------------------------------------------------------
1 | """particle swarm optimization"""
2 |
--------------------------------------------------------------------------------
/lekin/solver/operation_research/__init__.py:
--------------------------------------------------------------------------------
1 | """Operation research"""
2 |
--------------------------------------------------------------------------------
/lekin/solver/reinforcement_learning/__init__.py:
--------------------------------------------------------------------------------
1 | """Reinforcement learning"""
2 |
--------------------------------------------------------------------------------
/tests/test_solver/test_construction_heuristics/test_lpst.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
--------------------------------------------------------------------------------
/examples/data/MOInput.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hongyingyue/python-lekin/HEAD/examples/data/MOInput.xlsx
--------------------------------------------------------------------------------
/examples/data/JSP_dataset.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hongyingyue/python-lekin/HEAD/examples/data/JSP_dataset.xlsx
--------------------------------------------------------------------------------
/tests/test_solver/test_construction_heuristics/test_rule.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 |
3 | from lekin.lekin_struct import Job, JobCollector
4 |
--------------------------------------------------------------------------------
/docs/source/_static/backward-scheduling.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hongyingyue/python-lekin/HEAD/docs/source/_static/backward-scheduling.png
--------------------------------------------------------------------------------
/docs/source/_static/forward-scheduling.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hongyingyue/python-lekin/HEAD/docs/source/_static/forward-scheduling.png
--------------------------------------------------------------------------------
/lekin/dashboard/pages.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Type, Union
2 |
3 | import streamlit as st
4 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/__init__.py:
--------------------------------------------------------------------------------
1 | """Heuristics"""
2 |
3 |
4 | class Heuristics(object):
5 | def __init__(self, name):
6 | pass
7 |
--------------------------------------------------------------------------------
/tests/test_solver/test_construction_heuristics/test_spt.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from lekin.solver.construction_heuristics.spt import SPTScheduler
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "🐛 Bug Report"
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/encoding.py:
--------------------------------------------------------------------------------
1 | """
2 | 遗传算法中一种嵌入式编码方式:
3 | 原顺序: 1, 2, 3, 4, 5, 6, 7, 8
4 | 固定相对顺序: 2, 3, 6, 8
5 | 编码方式: 原顺序打乱之后, 找到新的 2, 3, 6, 8所处位置替换为原本的顺序
6 | """
7 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "🚀 Feature Request"
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | forward scheduling
4 | ```shell
5 |
6 | ```
7 |
8 |
9 | backward scheduling
10 | ```shell
11 |
12 | ```
13 |
14 |
15 | genetic
16 | ```shell
17 |
18 | ```
19 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "❓Question"
3 | about: Ask a general question
4 | title: ''
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## ❔Question
11 |
12 |
13 | ## Additional context
14 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y wget vim python3.8
5 |
6 | RUN pip install --no-cache-dir lekin
7 |
8 | # Set the default command to python3.
9 | CMD ["python3"]
10 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | precision: 2
3 | round: down
4 | range: "70...100"
5 | status:
6 | project:
7 | default:
8 | threshold: 3%
9 |
10 | patch:
11 | default:
12 | enabled: false
13 | changes: no
14 |
--------------------------------------------------------------------------------
/lekin/datasets/get_data.py:
--------------------------------------------------------------------------------
1 | """Generate the example jobshoppro"""
2 |
3 | import json
4 | import logging
5 |
6 |
7 | def get_data(name):
8 | # machine_list = []
9 | # route_list = []
10 | if name == "simple":
11 | pass
12 |
13 | return
14 |
--------------------------------------------------------------------------------
/docs/requirements_docs.txt:
--------------------------------------------------------------------------------
1 | recommonmark>=0.7.1
2 | nbconvert>=6.3.0
3 | pandoc>=1.0
4 | ipython
5 | sphinx>3.2
6 | nbsphinx==0.8.8
7 | sphinx_markdown_tables==0.0.17
8 | pydata_sphinx_theme==0.8.0
9 | docutils
10 | sphinx-autobuild
11 |
12 | pandas
13 | numpy
14 | ortools
15 | tensorflow
16 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Release notes
2 |
3 | ## v0.0.1 Initial release (15/10/2022)
4 |
5 | ### Added
6 | - solver support
7 | - dispatching rules
8 | - spt
9 | - fifo
10 | - edd
11 | - forward scheduling
12 | - backward scheduling
13 | - meta heuristics
14 | - local search
15 | - genetic
16 |
17 | ### Contributor
18 | - HongyingYue
19 |
--------------------------------------------------------------------------------
/lekin/datasets/check_data.py:
--------------------------------------------------------------------------------
1 | """Check the input job shop format and necessary information"""
2 |
3 | import logging
4 |
5 |
6 | def check_data(data):
7 | if data.keys() != ["routes", "machines"]:
8 | logging.error("key")
9 |
10 | if len(data["machines"]) < 1:
11 | logging.error("machine")
12 | if len(data["routes"]) < 1:
13 | logging.error("route")
14 |
15 | return
16 |
--------------------------------------------------------------------------------
/lekin/__init__.py:
--------------------------------------------------------------------------------
1 | from lekin.datasets.get_data import get_data
2 |
3 | # from lekin.lekin_struct.job import Job
4 | # from lekin.lekin_struct.machine import Machine
5 | # from lekin.lekin_struct.operation import Operation
6 | # from lekin.lekin_struct.route import Route
7 | from lekin.scheduler import Scheduler
8 | from lekin.solver.meta_heuristics import Heuristics
9 |
10 | __all__ = ["Job", "Machine", "Route", "Operation", "Scheduler", "get_data"]
11 |
12 | __version__ = "0.0.0"
13 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: style test docs
2 |
3 | check_dirs := lekin examples tests
4 |
5 | # run checks on all files and potentially modifies some of them
6 |
7 | style:
8 | black $(check_dirs)
9 | isort $(check_dirs)
10 | flake8 $(check_dirs)
11 | pre-commit run --files $(check_dirs)
12 |
13 | # run tests for the library
14 |
15 | test:
16 | python -m unittest
17 |
18 | # run tests for the docs
19 |
20 | docs:
21 | make -C docs clean M=$(shell pwd)
22 | make -C docs html M=$(shell pwd)
23 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ### Description
2 |
3 | This PR ...
4 |
5 | ### Checklist
6 |
7 | - [ ] Linked issues (if existing)
8 | - [ ] Amended changelog.md for large changes (and added myself there as contributor)
9 | - [ ] Added/modified tests
10 | - [ ] Used pre-commit hooks when committing to ensure that code is compliant with hooks. Install hooks with `pre-commit install`.
11 | To run hooks independent of commit, execute `pre-commit run --all-files`
12 |
13 | Thank you for joining. Have fun coding!
14 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/__init__.py:
--------------------------------------------------------------------------------
1 | """Dispatching rules"""
2 |
3 | from lekin.solver.construction_heuristics.atcs import ATCScheduler
4 | from lekin.solver.construction_heuristics.backward import BackwardScheduler
5 | from lekin.solver.construction_heuristics.forward import ForwardScheduler
6 | from lekin.solver.construction_heuristics.spt import SPTScheduler
7 |
8 | __all__ = [ATCScheduler, ForwardScheduler, SPTScheduler, BackwardScheduler]
9 |
10 |
11 | class RuleScheduler(object):
12 | def __init__(self):
13 | pass
14 |
--------------------------------------------------------------------------------
/docs/source/heuristics.rst:
--------------------------------------------------------------------------------
1 | 启发式排产
2 | ===============
3 |
4 |
5 | 禁忌搜索
6 | ------------
7 |
8 |
9 | 遗传算法
10 | -------------
11 |
12 | 遗传算法应用在排产中的关键就是如何将排产结果进行编码、以及如何计算fitness。简单理解就是: 把序列的permutation优化一下
13 |
14 | 每一个可行解被称为一个染色体,一个染色体由多个元素构成,这个元素称为基因。
15 |
16 | 遗传算法应用在排产问题时,以及TSP、VRP等经典问题,模型的解不表示数量,而表示**顺序**。
17 | 对于m个job,n个机器的Job shop排产问题。基因序列的长度是 m * n,因为每个job都需要在n台机器上加工。
18 |
19 | 两个序列表示模型的解,一个是工序的OS,一个是机器的MS。
20 | - 其中,OS基因序列的数值表示第i个job,这个数值第几次出现决定的是该job的第几道工序。
21 | - MS同理表示的选择的机器。
22 |
23 |
24 |
25 | 强化学习
26 | -------------
27 |
28 | Q-learning方法的关键是在agent探索过程中保存一个状态收益表。
29 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/base.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 |
4 | class BaseScheduler(object):
5 | def __init__(self, job_collector, resource_collector, **kwargs):
6 | self.job_collector = job_collector
7 | self.resource_collector = resource_collector
8 |
9 | for key, value in kwargs.items():
10 | setattr(self, key, value)
11 |
12 | def run(self):
13 | raise NotImplementedError
14 |
15 | def scheduling_job(self, job, **kwargs):
16 | raise NotImplementedError
17 |
18 | def find_best_resource_and_timeslot_for_operation(self, operation, **kwargs):
19 | raise NotImplementedError
20 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/lekin/utils/push_dense.py:
--------------------------------------------------------------------------------
1 | # push dense
2 | # for i, (id, resource) in enumerate(resource_collector.resources_dict.items()):
3 | # logging.info(
4 | # f'Start to push {i + 1}/{len(resource_collector.resources_dict)} resources'
5 | # )
6 | # assigned_ops = resource.assigned_operations
7 | # assigned_ops.sort()
8 | # for op in assigned_ops:
9 | # buffer_push = op.buffer
10 | # if buffer_push > resource.available_capacity:
11 | # buffer_push = resource.available_capacity
12 |
13 | # op.start_time -= buffer_push
14 |
15 | # # last chance for remaining jobs
16 | # for job in self.unassigned_jobs:
17 | # logging.warning(f'Abnornal! No scheduling for job {job.job_id}')
18 |
--------------------------------------------------------------------------------
/lekin/solver/config.py:
--------------------------------------------------------------------------------
1 | class SolverConfig:
2 | def __init__(self, optimization_strategy, entity_selector=None, move_selector=None, termination=None):
3 | self.optimization_strategy = optimization_strategy
4 | self.entity_selector = entity_selector
5 | self.move_selector = move_selector
6 | self.termination = termination
7 |
8 |
9 | class TerminationConfig:
10 | def __init__(self, seconds_spent_limit=None, max_iterations=None):
11 | self.seconds_spent_limit = seconds_spent_limit
12 | self.max_iterations = max_iterations
13 |
14 |
15 | class Message:
16 | def __init__(self, message: str):
17 | self.message = message
18 |
19 | def __repr__(self):
20 | return f"ErrorMessage(message={self.message})"
21 |
--------------------------------------------------------------------------------
/lekin/objective/makespan.py:
--------------------------------------------------------------------------------
1 | def calculate_makespan(job_collector):
2 | for job in job_collector.job_list:
3 | op = job.operations
4 | job.makespan = op.assigned_hours[-1]
5 |
6 | if job.demand_date is not None:
7 | job.tardiness = job.makespan - job.demand_date
8 | return
9 |
10 |
11 | def calculate_changeover_time(schedule_result, job_collector):
12 | changeover_time = 0
13 | for resource in job_collector.resources:
14 | previous_end_time = 0
15 | for operation in schedule_result:
16 | if operation.resource == resource:
17 | changeover_time += max(0, operation.start_time - previous_end_time)
18 | previous_end_time = operation.end_time
19 | return changeover_time
20 |
--------------------------------------------------------------------------------
/lekin/lekin_struct/exceptions.py:
--------------------------------------------------------------------------------
1 | """
2 | Custom exceptions for the lekin package.
3 | """
4 |
5 |
6 | class LekinError(Exception):
7 | """Base exception class for lekin package."""
8 |
9 | pass
10 |
11 |
12 | class ValidationError(LekinError):
13 | """Raised when validation of input data fails."""
14 |
15 | pass
16 |
17 |
18 | class SchedulingError(LekinError):
19 | """Raised when scheduling operations fail."""
20 |
21 | pass
22 |
23 |
24 | class ResourceError(LekinError):
25 | """Raised when resource-related operations fail."""
26 |
27 | pass
28 |
29 |
30 | class OperationError(LekinError):
31 | """Raised when operation-related operations fail."""
32 |
33 | pass
34 |
35 |
36 | class RouteError(LekinError):
37 | """Raised when route-related operations fail."""
38 |
39 | pass
40 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 | repos:
4 | - repo: https://github.com/pre-commit/pre-commit-hooks
5 | rev: v4.1.0
6 | hooks:
7 | - id: trailing-whitespace
8 | - id: end-of-file-fixer
9 | - id: check-yaml
10 | - id: check-ast
11 | - repo: https://github.com/PyCQA/flake8
12 | rev: "3.9.2"
13 | hooks:
14 | - id: flake8
15 | - repo: https://github.com/pre-commit/mirrors-isort
16 | rev: v5.10.1
17 | hooks:
18 | - id: isort
19 | - repo: https://github.com/psf/black
20 | rev: 22.3.0
21 | hooks:
22 | - id: black
23 | - repo: https://github.com/nbQA-dev/nbQA
24 | rev: 1.2.3
25 | hooks:
26 | - id: nbqa-black
27 | - id: nbqa-isort
28 | - id: nbqa-flake8
29 | - id: nbqa-check-ast
30 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | build:
9 | os: ubuntu-22.04
10 | tools:
11 | python: "3.8"
12 |
13 | # Build documentation in the docs/ directory with Sphinx
14 | # reference: https://docs.readthedocs.io/en/stable/config-file/v2.html#sphinx
15 | sphinx:
16 | configuration: docs/source/conf.py
17 | fail_on_warning: false
18 |
19 | # Build documentation with MkDocs
20 | # mkdocs:
21 | # configuration: mkdocs.yml
22 |
23 | # Optionally build your docs in additional formats such as PDF and ePub
24 | formats:
25 | - htmlzip
26 |
27 | # Optionally set the version of Python and requirements required to build your docs
28 | python:
29 | install:
30 | - requirements: docs/requirements_docs.txt
31 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/edd.py:
--------------------------------------------------------------------------------
1 | """Earliest Due Date"""
2 |
3 | import logging
4 |
5 | from lekin.solver.construction_heuristics.base import BaseScheduler
6 |
7 |
8 | class EDDScheduler(object):
9 | def __init__(self, jobs, routes):
10 | self.jobs = jobs
11 | self.routes = routes
12 |
13 | def schedule_job(self, job):
14 | # Schedule the operations of a job using EDD method
15 | current_time = 0
16 | for operation in job.route.operations:
17 | operation.start_time = max(current_time, operation.available_time)
18 | operation.end_time = operation.start_time + operation.processing_time
19 | current_time = operation.end_time
20 |
21 | def run(self):
22 | for job in self.jobs:
23 | self.schedule_job(job)
24 |
25 |
26 | class MS(object):
27 | """Variation of EDD"""
28 |
29 | def __init__(self):
30 | pass
31 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/fifo.py:
--------------------------------------------------------------------------------
1 | """First In First Out"""
2 |
3 | import logging
4 |
5 | from lekin.solver.construction_heuristics.base import BaseScheduler
6 |
7 |
8 | class FCFSScheduler:
9 | """
10 | - 初始化,记录各个机器前的任务等待序列。模拟时间进度
11 | """
12 |
13 | def __init__(self, jobs, routes):
14 | self.jobs = jobs
15 | self.routes = routes
16 |
17 | def init(self):
18 | """ """
19 | pass
20 |
21 | def schedule_job(self, job):
22 | # Schedule the operations of a job in FCFS order
23 | current_time = 0
24 | for operation in job.route.operations:
25 | operation.start_time = max(current_time, operation.available_time)
26 | operation.end_time = operation.start_time + operation.processing_time
27 | current_time = operation.end_time
28 |
29 | def run(self):
30 | for job in self.jobs:
31 | self.schedule_job(job)
32 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/lekin/solver/utils/push_dense.py:
--------------------------------------------------------------------------------
1 | def push_dense(schedule):
2 | changed = True
3 |
4 | while changed:
5 | changed = False
6 |
7 | for resource in schedule.resources:
8 | slots = sorted(resource.slots, key=lambda x: x.start_time)
9 |
10 | for i in range(len(slots) - 1):
11 | curr_slot = slots[i]
12 | next_slot = slots[i + 1]
13 |
14 | if curr_slot.end_time < next_slot.start_time:
15 | # Gap exists between operations
16 |
17 | gap = next_slot.start_time - curr_slot.end_time
18 |
19 | if next_slot.operation.can_start_early(gap):
20 | # Update slots
21 | next_slot.start_time -= gap
22 | next_slot.end_time -= gap
23 |
24 | curr_slot.end_time = next_slot.start_time
25 |
26 | changed = True
27 |
28 | return schedule
29 |
--------------------------------------------------------------------------------
/lekin/objective/tardiness.py:
--------------------------------------------------------------------------------
1 | """Tardiness total/maximum/weighted"""
2 |
3 |
4 | def calculate_tardiness(schedule_result, job):
5 | end_time = schedule_result[job.route.operations[-1]][1]
6 | return max(0, end_time - job.demand_date)
7 |
8 |
9 | def calculate_total_tardiness(schedule_result, jobs):
10 | total_tardiness = 0
11 | for job in jobs:
12 | total_tardiness += calculate_tardiness(schedule_result, job)
13 | return total_tardiness
14 |
15 |
16 | def calculate_total_late_jobs(schedule_result, jobs):
17 | total_late_jobs = 0
18 | for job in jobs:
19 | if calculate_tardiness(schedule_result, job) > 0:
20 | total_late_jobs += 1
21 | return total_late_jobs
22 |
23 |
24 | def calculate_total_late_time(schedule_result, jobs):
25 | total_late_time = 0
26 | for job in jobs:
27 | tardiness = calculate_tardiness(schedule_result, job)
28 | if tardiness > 0:
29 | total_late_time += tardiness
30 | return total_late_time
31 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/cr.py:
--------------------------------------------------------------------------------
1 | """Critical ratio rule"""
2 |
3 | import logging
4 |
5 | from lekin.solver.construction_heuristics.base import BaseScheduler
6 |
7 |
8 | class CRScheduler(object):
9 | def __init__(self, jobs, routes):
10 | self.jobs = jobs
11 | self.routes = routes
12 |
13 | def calculate_critical_ratio(self, operation):
14 | time_remaining = operation.job.due_date - operation.start_time
15 | return time_remaining / operation.processing_time
16 |
17 | def schedule_job(self, job):
18 | # Schedule the operations of a job using CR method
19 | current_time = 0
20 | for operation in job.route.operations:
21 | operation.start_time = max(current_time, operation.available_time)
22 | operation.end_time = operation.start_time + operation.processing_time
23 | current_time = operation.end_time
24 |
25 | # Sort the operations based on critical ratio
26 | job.route.operations.sort(key=self.calculate_critical_ratio, reverse=True)
27 |
28 | def run(self):
29 | for job in self.jobs:
30 | self.schedule_job(job)
31 |
--------------------------------------------------------------------------------
/tests/test_solver/test_meta_heuristics/test_branch_and_bound.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | import unittest
3 |
4 | from lekin.lekin_struct import Job, Operation, Resource, Route, TimeSlot
5 | from lekin.solver.meta_heuristics.branch_and_bound import BranchAndBoundScheduler
6 |
7 | # class BranchAndBoundSchedulerTest(unittest.TestCase):
8 | # def test_schedule(self):
9 | # job1 = Job(1, datetime(2023, 1, 10), 2, 1)
10 | # job2 = Job(2, datetime(2023, 1, 20), 1, 1)
11 | #
12 | # op1 = Operation(1, timedelta(hours=2), 2, None, [1])
13 | # op2 = Operation(2, timedelta(hours=3), None, 1, [1])
14 | #
15 | # route1 = Route(1, [op1, op2])
16 | # print(route1)
17 | #
18 | # resource1 = Resource(1, [TimeSlot(datetime(2023, 1, 1), datetime(2023, 1, 3))])
19 | #
20 | # job_list = [job1, job2]
21 | # resource_list = [resource1]
22 | #
23 | # scheduler = BranchAndBoundScheduler(job_list, resource_list)
24 | # schedule = scheduler.get_schedule()
25 | # for idx, slot in enumerate(schedule):
26 | # print(f"Job {idx + 1} will start at {slot.start_time} and end at {slot.end_time}")
27 |
--------------------------------------------------------------------------------
/.github/workflows/pypi_release.yml:
--------------------------------------------------------------------------------
1 | # This workflows will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | name: PyPi Release
5 |
6 | on:
7 | release:
8 | types: [created]
9 |
10 | jobs:
11 | deploy:
12 | runs-on: ubuntu-latest
13 |
14 | permissions:
15 | contents: read # for actions/checkout
16 | id-token: write # required for trusted publishing (optional)
17 |
18 | steps:
19 | - name: Checkout code
20 | uses: actions/checkout@v4
21 |
22 | - name: Set up Python
23 | uses: actions/setup-python@v5
24 | with:
25 | python-version: '3.11'
26 |
27 | - name: Install build tools
28 | run: |
29 | python -m pip install --upgrade pip
30 | pip install build twine
31 |
32 | - name: Build the package
33 | run: |
34 | python -m build
35 |
36 | - name: Publish to PyPI
37 | env:
38 | TWINE_USERNAME: __token__
39 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
40 | run: |
41 | twine upload dist/*
42 |
--------------------------------------------------------------------------------
/.github/workflows/conda_release.yml:
--------------------------------------------------------------------------------
1 | name: Release - Conda
2 |
3 | on:
4 | push:
5 | tags:
6 | - v*
7 | branches:
8 | - conda_*
9 |
10 | env:
11 | ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_API_TOKEN }}
12 |
13 | jobs:
14 | build_and_package:
15 | runs-on: ubuntu-22.04
16 | defaults:
17 | run:
18 | shell: bash -l {0}
19 |
20 | steps:
21 | - name: Checkout repository
22 | uses: actions/checkout@v1
23 |
24 | - name: Install miniconda
25 | uses: conda-incubator/setup-miniconda@v2
26 | with:
27 | auto-update-conda: true
28 | auto-activate-base: false
29 | python-version: 3.8
30 | activate-environment: "build-transformers"
31 | channels: huggingface
32 |
33 | - name: Setup conda env
34 | run: |
35 | conda install -c defaults anaconda-client conda-build
36 |
37 | - name: Extract version
38 | run: echo "TRANSFORMERS_VERSION=`python setup.py --version`" >> $GITHUB_ENV
39 |
40 | - name: Build conda packages
41 | run: |
42 | conda info
43 | conda list
44 | conda-build .github/conda
45 |
46 | - name: Upload to Anaconda
47 | run: anaconda upload `conda-build .github/conda --output` --force
48 |
--------------------------------------------------------------------------------
/tests/test_solver/test_temp/continious_rule.py:
--------------------------------------------------------------------------------
1 | # 连续排产的规则调整
2 |
3 | from dataclasses import dataclass
4 |
5 |
6 | @dataclass
7 | class OP:
8 | id: int
9 | val: int
10 |
11 |
12 | op1 = OP(id=1, val=0)
13 | op2 = OP(id=2, val=1)
14 | op3 = OP(id=3, val=0)
15 | op4 = OP(id=4, val=1)
16 | op5 = OP(id=5, val=1)
17 | op6 = OP(id=6, val=0)
18 | op7 = OP(id=7, val=0)
19 | op8 = OP(id=8, val=1)
20 | op9 = OP(id=9, val=1)
21 | op10 = OP(id=10, val=0)
22 | op11 = OP(id=11, val=0)
23 | op12 = OP(id=12, val=0)
24 | op13 = OP(id=13, val=1)
25 |
26 | op_list = [eval(f"op{i}") for i in range(1, 14)]
27 |
28 | prefix = 4
29 | suffix = 13
30 |
31 | i = max(prefix - 1, 0)
32 | while i < suffix:
33 |
34 | print(i)
35 | insert_index = i + 1
36 | j = i + 1
37 | while j < suffix and op_list[j].val == op_list[i].val:
38 | j += 1
39 | insert_index += 1
40 |
41 | while j < suffix:
42 | if j != insert_index and op_list[j].val == op_list[i].val:
43 | op_list.insert(insert_index, op_list.pop(j))
44 | insert_index += 1
45 | j += 1
46 |
47 | i = insert_index
48 | print(i)
49 | print([op.id for op in op_list])
50 | print([op.val for op in op_list])
51 | print("-" * 30)
52 |
53 |
54 | print([op.id for op in op_list])
55 | print([op.val for op in op_list])
56 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyBuilder
31 | target/
32 |
33 | # Jupyter Notebook
34 | .ipynb_checkpoints
35 |
36 | # IPython
37 | profile_default/
38 | ipython_config.py
39 |
40 | # pyenv
41 | .python-version
42 |
43 | # Sphinx documentation
44 | docs/_build/
45 | docs/source/api/
46 | docs/source/CHANGELOG.md
47 |
48 | # mkdocs documentation
49 | /site
50 |
51 | # pycharm
52 | .idea
53 |
54 | # vscode
55 | .vscode
56 |
57 | # checkpoints
58 | *.ckpt
59 | *.pkl
60 | .DS_Store
61 |
62 | # Environments
63 | .env
64 | .venv
65 | env/
66 | venv/
67 | ENV/
68 | env.bak/
69 | venv.bak/
70 |
71 | # Unit test / coverage reports
72 | htmlcov/
73 | .tox/
74 | .nox/
75 | .coverage
76 | .coverage.*
77 | .cache
78 | nosetests.xml
79 | coverage.xml
80 | *.cover
81 | *.py,cover
82 | .hypothesis/
83 | .pytest_cache/
84 |
85 | # HongyingYue
86 | **/nohup.out
87 | /reference/
88 | /data/
89 | /weights/
90 | /reference/jobshoppro/*
91 | /business/*
92 | /examples/*.xlsx
93 | ~$*.xlsx
94 |
--------------------------------------------------------------------------------
/tests/test_lekin_struct/test_job.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 |
3 |
4 | class Job:
5 | def __init__(
6 | self,
7 | id,
8 | earliest_breach_date=None,
9 | earliest_promised_date=None,
10 | earliest_latest_completion_time=None,
11 | required_date=None,
12 | ):
13 | self.id = id
14 | self.earliest_breach_date = earliest_breach_date
15 | self.earliest_promised_date = earliest_promised_date
16 | self.earliest_latest_completion_time = earliest_latest_completion_time
17 | self.required_date = required_date
18 |
19 |
20 | a = Job(id=0, earliest_breach_date=datetime(2022, 9, 1))
21 |
22 | b = Job(id=1, earliest_promised_date=datetime(2022, 8, 1))
23 |
24 | c = Job(id=2, earliest_breach_date=datetime(2022, 9, 10))
25 |
26 | d = Job(id=3, earliest_promised_date=datetime(2022, 9, 5))
27 |
28 | e = Job(id=4, required_date=datetime(2022, 7, 5))
29 |
30 | candidates = [a, b, c, d, e, a]
31 |
32 | candidates = sorted(
33 | candidates,
34 | key=lambda x: (
35 | x.earliest_breach_date is None,
36 | x.earliest_breach_date,
37 | x.earliest_promised_date is None,
38 | x.earliest_promised_date,
39 | x.earliest_latest_completion_time is None,
40 | x.earliest_latest_completion_time,
41 | x.required_date,
42 | ),
43 | )
44 | print([i.id for i in candidates])
45 |
--------------------------------------------------------------------------------
/lekin/objective/score.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 |
4 | class Score:
5 | def __init__(self, hard_score: int = 0, soft_score: int = 0):
6 | self.hard_score = hard_score # Represents hard constraints violations
7 | self.soft_score = soft_score # Represents soft constraints optimizations
8 |
9 | def __add__(self, other: "Score") -> "Score":
10 | return Score(hard_score=self.hard_score + other.hard_score, soft_score=self.soft_score + other.soft_score)
11 |
12 | def __sub__(self, other: "Score") -> "Score":
13 | return Score(hard_score=self.hard_score - other.hard_score, soft_score=self.soft_score - other.soft_score)
14 |
15 | def __lt__(self, other: "Score") -> bool:
16 | return (self.hard_score, self.soft_score) < (other.hard_score, other.soft_score)
17 |
18 | def __eq__(self, other: "Score") -> bool:
19 | return (self.hard_score, self.soft_score) == (other.hard_score, other.soft_score)
20 |
21 | def __repr__(self):
22 | return f"Score(hard_score={self.hard_score}, soft_score={self.soft_score})"
23 |
24 | def is_feasible(self) -> bool:
25 | """Check if the score is feasible (i.e., no hard constraint violations)."""
26 | return self.hard_score >= 0
27 |
28 | def total_score(self) -> int:
29 | """Compute the total score considering both hard and soft scores."""
30 | return self.hard_score + self.soft_score
31 |
--------------------------------------------------------------------------------
/lekin/lekin_struct/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Lekin - A Flexible Job Shop Scheduling Problem (FJSP) Framework
3 |
4 | This module provides the core data structures for modeling and solving Job Shop Scheduling Problems.
5 | The framework is designed to be flexible, extensible, and easy to use for both research and practical applications.
6 |
7 | Core Concepts:
8 | - Job: Represents a manufacturing order or production task
9 | - Operation: Represents a specific manufacturing step within a job
10 | - Resource: Represents machines, workstations, or other processing units
11 | - Route: Defines the sequence of operations for a job
12 | - TimeSlot: Represents available time slots for scheduling
13 |
14 | Example:
15 | >>> from lekin.lekin_struct import Job, Operation, Resource
16 | >>> job = Job("J1", priority=1, quantity=100)
17 | >>> operation = Operation("O1", duration=30)
18 | >>> resource = Resource("M1", capacity=1)
19 | """
20 |
21 | from lekin.lekin_struct.job import Job, JobCollector
22 | from lekin.lekin_struct.operation import Operation, OperationCollector
23 | from lekin.lekin_struct.resource import Resource, ResourceCollector
24 | from lekin.lekin_struct.route import Route, RouteCollector
25 | from lekin.lekin_struct.timeslot import TimeSlot
26 |
27 | __version__ = "0.1.0"
28 | __author__ = "Lekin Contributors"
29 | __license__ = "MIT"
30 |
31 | __all__ = [
32 | "Job",
33 | "JobCollector",
34 | "Operation",
35 | "OperationCollector",
36 | "Resource",
37 | "ResourceCollector",
38 | "Route",
39 | "RouteCollector",
40 | "TimeSlot",
41 | ]
42 |
--------------------------------------------------------------------------------
/docs/source/_templates/custom-module-template.rst:
--------------------------------------------------------------------------------
1 | {{ fullname.split(".")[-1] | escape | underline}}
2 |
3 | .. automodule:: {{ fullname }}
4 |
5 | {% block attributes %}
6 | {% if attributes %}
7 | .. rubric:: Module Attributes
8 |
9 | .. autosummary::
10 | :toctree:
11 | {% for item in attributes %}
12 | {{ item }}
13 | {%- endfor %}
14 | {% endif %}
15 | {% endblock %}
16 |
17 | {% block functions %}
18 | {% if functions %}
19 | .. rubric:: {{ _('Functions') }}
20 |
21 | .. autosummary::
22 | :toctree:
23 | :template: custom-base-template.rst
24 | {% for item in functions %}
25 | {{ item }}
26 | {%- endfor %}
27 | {% endif %}
28 | {% endblock %}
29 |
30 | {% block classes %}
31 | {% if classes %}
32 | .. rubric:: {{ _('Classes') }}
33 |
34 | .. autosummary::
35 | :toctree:
36 | :template: custom-class-template.rst
37 | {% for item in classes %}
38 | {{ item }}
39 | {%- endfor %}
40 | {% endif %}
41 | {% endblock %}
42 |
43 | {% block exceptions %}
44 | {% if exceptions %}
45 | .. rubric:: {{ _('Exceptions') }}
46 |
47 | .. autosummary::
48 | :toctree:
49 | {% for item in exceptions %}
50 | {{ item }}
51 | {%- endfor %}
52 | {% endif %}
53 | {% endblock %}
54 |
55 | {% block modules %}
56 | {% if modules %}
57 | .. rubric:: Modules
58 |
59 | .. autosummary::
60 | :toctree:
61 | :template: custom-module-template.rst
62 | :recursive:
63 | {% for item in modules %}
64 | {{ item }}
65 | {%- endfor %}
66 | {% endif %}
67 | {% endblock %}
68 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/atcs.py:
--------------------------------------------------------------------------------
1 | """Apparent Tardiness Cost"""
2 |
3 | import logging
4 |
5 | from lekin.solver.construction_heuristics.base import BaseScheduler
6 |
7 |
8 | class ATCScheduler(object):
9 | def __init__(self, jobs, routes):
10 | self.jobs = jobs
11 | self.routes = routes
12 |
13 | def calculate_tardiness_cost(self, operation):
14 | # Calculate the tardiness cost for an operation based on its finish time and due date
15 | if operation.end_time > operation.due_date:
16 | return operation.end_time - operation.due_date
17 | else:
18 | return 0
19 |
20 | def schedule_job(self, job):
21 | # Schedule the operations of a job using ATC method
22 | for operation in job.route.operations:
23 | operation.start_time = max(operation.available_time, operation.earliest_start_time)
24 | operation.end_time = operation.start_time + operation.processing_time
25 |
26 | # Sort the operations by their tardiness cost in descending order
27 | sorted_operations = sorted(job.route.operations, key=self.calculate_tardiness_cost, reverse=True)
28 |
29 | # Reschedule the operations based on their tardiness cost
30 | current_time = 0
31 | for operation in sorted_operations:
32 | operation.start_time = max(current_time, operation.earliest_start_time)
33 | operation.end_time = operation.start_time + operation.processing_time
34 | current_time = operation.end_time
35 |
36 | def run(self):
37 | for job in self.jobs:
38 | self.schedule_job(job)
39 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. python-lekin documentation master file, created by
2 | sphinx-quickstart on Fri Sep 30 17:57:17 2022.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | python-lekin documentation
7 | ========================================
8 | .. raw:: html
9 |
10 | GitHub
11 |
12 |
13 | **python-lekin** 是一个工厂智能排产调度工具,名字来源于`Lekin `_.
14 |
15 |
16 | 车间排产快速入门
17 | -----------------------
18 |
19 | 排产是一个分配任务,将有限的资源分配给需求。因此需求需要有优先级,约束主要有产能约束与物料约束。产能约束,将订单中的成品按工艺路线分解为工序,而每一道工序有对应的生产机器;物料约束,将订单的成品按BOM(bill of materials)展开为原材料需求,每一道工序开始前需要对应原材料齐套。
20 |
21 | 更直白来讲,就是把工序任务分配在资源和时间上。目标是总完成时间最小,换型最少等。工序任务,上级有需求(需求带有优先级),平级有工序先后关系,和资源的关系有可用资源和资源优先级, 时长;资源有日历。
22 |
23 | 下标,机器k kk加工为任务i ii后加工任务j jj
24 |
25 | 其中,:math:`A_\text{c} = (\pi/4) d^2`
26 |
27 |
28 | subject to:
29 |
30 | .. math:: \alpha{}_t(i) = P(O_1, O_2, … O_t, q_t = S_i \lambda{})
31 |
32 | Flexible Job-Shop Scheduling problem(FJSP)包含两个任务
33 | - Machine assignment: 选择机器
34 | - Operation sequencing:工序顺序
35 |
36 |
37 | Finite Capacity Planning
38 |
39 |
40 | .. toctree::
41 | :maxdepth: 2
42 | :caption: Contents:
43 |
44 | rules
45 | heuristics
46 | application
47 | GitHub
48 |
49 |
50 | Indices and tables
51 | =========================
52 |
53 | * :ref:`genindex`
54 | * :ref:`modindex`
55 | * :ref:`search`
56 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 120
3 | show-source = true
4 | ignore =
5 | # space before : (needed for how black formats slicing)
6 | E203,
7 | # line break before binary operator
8 | W503,
9 | # line break after binary operator
10 | W504,
11 | # module level import not at top of file
12 | E402,
13 | # do not assign a lambda expression, use a def
14 | E731,
15 | # ignore not easy to read variables like i l I etc.
16 | E741,
17 | # Unnecessary list literal - rewrite as a dict literal.
18 | C406,
19 | # Unnecessary dict call - rewrite as a literal.
20 | C408,
21 | # Unnecessary list passed to tuple() - rewrite as a tuple literal.
22 | C409,
23 | # found modulo formatter (incorrect picks up mod operations)
24 | S001,
25 | # unused imports
26 | F401
27 | exclude = docs/build/*.py,
28 | node_modules/*.py,
29 | .eggs/*.py,
30 | versioneer.py,
31 | venv/*,
32 | .venv/*,
33 | .git/*
34 | .history/*
35 |
36 | [isort]
37 | profile = black
38 | honor_noqa = true
39 | line_length = 120
40 | combine_as_imports = true
41 | force_sort_within_sections = true
42 | known_first_party = pytorch_forecasting
43 |
44 | [tool:pytest]
45 | addopts =
46 | -rsxX
47 | -vv
48 | --last-failed
49 | --cov=pytorch_forecasting
50 | --cov-report=html
51 | --cov-config=setup.cfg
52 | --cov-report=term-missing:skip-covered
53 | --no-cov-on-fail
54 | -n0
55 | testpaths = tests/
56 | log_cli_level = ERROR
57 | markers =
58 |
59 | [coverage:report]
60 | ignore_errors = False
61 | show_missing = true
62 |
63 |
64 | [mypy]
65 | ignore_missing_imports = true
66 | no_implicit_optional = true
67 | check_untyped_defs = true
68 |
69 | cache_dir = .cache/mypy/
70 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/lsf.py:
--------------------------------------------------------------------------------
1 | """L"""
2 |
3 | import logging
4 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5 |
6 | from lekin.solver.construction_heuristics.base import BaseScheduler
7 |
8 |
9 | class LSTScheduler(object):
10 | def __init__(self, jobs, routes):
11 | self.jobs = jobs
12 | self.routes = routes
13 |
14 | def calculate_slack_time(self, operation, current_time):
15 | # Calculate the slack time for an operation based on its due date and current time
16 | return max(0, operation.due_date - current_time)
17 |
18 | def select_next_operation(self, available_operations, current_time):
19 | # Select the operation with the longest slack time from the available operations
20 | selected_operation = None
21 | max_slack_time = float("-inf")
22 |
23 | for operation in available_operations:
24 | slack_time = self.calculate_slack_time(operation, current_time)
25 | if slack_time > max_slack_time:
26 | max_slack_time = slack_time
27 | selected_operation = operation
28 |
29 | return selected_operation
30 |
31 | def schedule_job(self, job, start_time):
32 | # Schedule the operations of a job based on LST
33 | current_time = start_time
34 |
35 | for operation in job.route.operations:
36 | slack_time = self.calculate_slack_time(operation, current_time)
37 | print(slack_time)
38 | operation.start_time = current_time
39 | operation.end_time = current_time + operation.processing_time
40 | current_time += operation.processing_time
41 |
42 | job.completion_time = current_time
43 |
44 | def run(self):
45 | for job in self.jobs:
46 | self.schedule_job(job, 0)
47 |
--------------------------------------------------------------------------------
/docs/source/rules.rst:
--------------------------------------------------------------------------------
1 | 规则排产
2 | ============
3 |
4 | 分为rule-based、event-based、resource-based两种思路。
5 |
6 | SPT最短加工时间
7 | --------------------
8 |
9 | 按任务所需工序时间长短,从短到长顺序排列.
10 | 实现中,为保证工艺路径的先后约束关系,构造规则法通过循环的先后关系来保证满足约束。
11 |
12 |
13 | EDD最早预定交货期规则
14 | ---------------------------
15 |
16 | 按生产任务规定完成时刻(预定交货期)的先后,从先到后顺次排列
17 |
18 | SPT—EDD规则
19 | -----------------
20 |
21 | 1)根据EDD规则安排D(max)为最小的方案。
22 | 2)计算所有任务的总流程时间。
23 | 3)查找方案中,预定交贷期(di)大于总流程时间的生产任务(不惟一),按SPT规则,将其中加工时间最大者排于最后。
24 | 4)舍弃第3步能排定的最后任务者及其后序任务,回到第2步重复。
25 |
26 |
27 | 关键路径法
28 | -------------
29 |
30 | 关键路径是决定项目完成的最短时间,关键路径可能不止一条。
31 |
32 | 其基本概念:
33 | 最早开始时间 (Early start)
34 | 最晚开始时间 (Late start)
35 | 最早完成时间 (Early finish)
36 | 最晚完成时间 (Late finish)
37 | 松弛时间 (slack)
38 |
39 | 正推方法确定每个任务的最早开始时间和最早完成时间,逆推方法确定每个任务的最晚完成时间和最晚开始时间。
40 |
41 |
42 | 顺排
43 | -------------
44 |
45 | 顺排和倒排,和其他规则启发式算法一样,一个工序集一个工序集的排。每排一个工序,工序job完成后,更新机器、job状态、后续job状态。
46 | 顺排对下一道工序的约束是:最早开始时间
47 |
48 | .. code-block:: python
49 |
50 | backward(operations, next_op_start_until, with_material_kitting_constraint, align_with_same_production_line, latest_start_time, latest_end_time)
51 |
52 |
53 | .. code-block:: python
54 |
55 | assign_op(operation, is_critical, direction: str)
56 |
57 | 在顺排中,排的比较紧密的资源往往就是瓶颈资源。
58 |
59 | 倒排
60 | ---------------
61 |
62 | 每一个MO最早开始时间初始化:max(ESD, today)。确保开始时间不早于今天,或不早于资源日历最早开始时间
63 | 倒排对下一道工序的约束是: 最晚结束时间
64 |
65 | 倒排
66 | - 从业务上可以减少库存, just-in-time
67 | - 带来的结果是不连续
68 | - 影响连续排产的判断
69 | - 考虑物料齐套时,导致倒排可能需要一次次推倒重来
70 |
71 |
72 | .. code-block:: python
73 |
74 | forward(operations, next_op_start_until, with_material_kitting_constraint, align_with_same_production_line, earliest_start_time, earliest_end_time)
75 |
76 |
77 |
78 | 终局
79 | ----------------------
80 |
81 | 规则启发在排产中的应用,进行足够的抽象后,灵活使用多种方法的结合。
82 |
83 | 一批次可开始的。剩余未开始的
84 | - 那么,可开始的是否一定要比未开始的先开始呢?其实不是
85 | - 那么通过工序图的依赖关系,其实给定了每个工序的最早开始时间约束。甚至不是具体的时间,而是一个变量之间的
86 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | on:
4 | push:
5 | branches: [master, main, dev]
6 | pull_request:
7 | branches: [master, main]
8 |
9 | jobs:
10 | linter-black:
11 | name: Check code formatting with Black
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Checkout
15 | uses: actions/checkout@v4
16 | - name: Set up Python 3.9
17 | uses: actions/setup-python@v5
18 | with:
19 | python-version: 3.9
20 | - name: Install Black
21 | run: pip install black[jupyter]
22 | - name: Run Black
23 | run: black --check .
24 |
25 | imports-check-isort:
26 | name: Check valid import formatting with isort
27 | runs-on: ubuntu-latest
28 | steps:
29 | - name: Checkout
30 | uses: actions/checkout@v4
31 | - name: Set up Python 3.9
32 | uses: actions/setup-python@v5
33 | with:
34 | python-version: 3.9
35 | - name: Install isort
36 | run: pip install isort==5.6.4
37 | - name: Run isort
38 | run: isort --check-only --diff .
39 |
40 | linter-flake8:
41 | name: Check valid formatting with flake8
42 | runs-on: ubuntu-latest
43 | timeout-minutes: 10
44 | steps:
45 | - name: Checkout
46 | uses: actions/checkout@v4
47 | - uses: actions/setup-python@v5
48 | with:
49 | python-version: 3.9
50 |
51 | - name: Install dependencies
52 | run: pip install flake8==3.9.2
53 | - name: Run checks
54 | run: flake8
55 |
56 | pre-commit-hooks:
57 | name: Check that pre-commit hooks pass
58 | runs-on: ubuntu-latest
59 | timeout-minutes: 10
60 | steps:
61 | - name: Checkout
62 | uses: actions/checkout@v4
63 | - uses: actions/setup-python@v5
64 | with:
65 | python-version: 3.9
66 | - name: Install dependencies
67 | run: pip install pre-commit
68 |
69 | - name: Run checks
70 | run: pre-commit run --all-files
71 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities.
2 | # https://github.com/github/codeql-action
3 |
4 | name: "CodeQL"
5 |
6 | on:
7 | schedule:
8 | - cron: '0 0 29 2 *' # Runs at 00:00 UTC on the 29th of Feb
9 |
10 | jobs:
11 | analyze:
12 | name: Analyze
13 | runs-on: ubuntu-latest
14 |
15 | strategy:
16 | fail-fast: false
17 | matrix:
18 | language: ['python']
19 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
20 |
21 | steps:
22 | - name: Checkout repository
23 | uses: actions/checkout@v3
24 |
25 | # Initializes the CodeQL tools for scanning.
26 | - name: Initialize CodeQL
27 | uses: github/codeql-action/init@v2
28 | with:
29 | languages: ${{ matrix.language }}
30 | # If you wish to specify custom queries, you can do so here or in a config file.
31 | # By default, queries listed here will override any specified in a config file.
32 | # Prefix the list here with "+" to use these queries and those in the config file.
33 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
34 |
35 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
36 | # If this step fails, then you should remove it and run the build manually (see below)
37 | - name: Autobuild
38 | uses: github/codeql-action/autobuild@v2
39 |
40 | # ℹ️ Command-line programs to run using the OS shell.
41 | # 📚 https://git.io/JvXDl
42 |
43 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
44 | # and modify them (or add more) to build your code if your project
45 | # uses a compiled language
46 |
47 | #- run: |
48 | # make bootstrap
49 | # make release
50 |
51 | - name: Perform CodeQL Analysis
52 | uses: github/codeql-action/analyze@v2
53 |
--------------------------------------------------------------------------------
/lekin/utils/group_op_ds.py:
--------------------------------------------------------------------------------
1 | """
2 | 新加入的时候可以快速找到该节点,因此之前用了dict[id, MaterialOP]
3 |
4 | 移动的时候, 先标记一些candidate
5 |
6 | 找到一个candidate, 往前找之前的插入位置
7 |
8 | 重新整理
9 |
10 | """
11 |
12 |
13 | class DictNode:
14 | def __init__(self, key, value):
15 | self.key = key
16 | self.value = value
17 | self.prev = None
18 | self.next = None
19 |
20 |
21 | from collections import OrderedDict
22 |
23 |
24 | class IndexedList:
25 | def __init__(self):
26 | self.ordered_dict = OrderedDict()
27 | self.order_list = []
28 |
29 | def insert_at_index(self, index, key, value):
30 | if index < 0 or index > len(self.order_list):
31 | raise IndexError("Index out of bounds")
32 |
33 | self.ordered_dict[key] = value
34 | self.order_list.insert(index, key)
35 |
36 | def insert_after(self, key, new_key, new_value):
37 | if key is None:
38 | # Insert at the beginning
39 | self.ordered_dict[new_key] = new_value
40 | elif key in self.ordered_dict:
41 | # Insert after the specified key
42 | items = list(self.ordered_dict.items())
43 | index = next((i for i, (k, v) in enumerate(items) if k == key), -1)
44 | if index != -1:
45 | items.insert(index + 1, (new_key, new_value))
46 | self.ordered_dict = OrderedDict(items)
47 | else:
48 | raise KeyError(f"Key '{key}' not found in the indexed list")
49 | else:
50 | raise KeyError(f"Key '{key}' not found in the indexed list")
51 |
52 | def display(self):
53 | for key, value in self.ordered_dict.items():
54 | print(f"({key}: {value})", end=" ")
55 | print()
56 |
57 |
58 | # Example Usage:
59 | indexed_list = IndexedList()
60 | indexed_list.insert_at_index(0, "a", 1)
61 | indexed_list.insert_at_index(1, "b", 2)
62 | indexed_list.insert_at_index(1, "c", 3)
63 | indexed_list.display() # Output: (a: 1) (c: 3) (b: 2)
64 |
65 | indexed_list.insert_at_index(2, "d", 4)
66 | indexed_list.display() # Output: (a: 1) (c: 3) (d: 4) (b: 2)
67 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/hill_climbing.py:
--------------------------------------------------------------------------------
1 | """Hill climbing"""
2 |
3 | import random
4 |
5 |
6 | class HillClimbingScheduler:
7 | def __init__(self, job_collector):
8 | self.job_collector = job_collector
9 |
10 | def schedule(self, max_iterations=1000):
11 | current_solution = self.random_solution()
12 | current_score = self.evaluate_solution(current_solution)
13 |
14 | for _ in range(max_iterations):
15 | neighbors = self.get_neighbors(current_solution)
16 | if not neighbors:
17 | break
18 |
19 | next_solution = max(neighbors, key=lambda neighbor: self.evaluate_solution(neighbor))
20 | next_score = self.evaluate_solution(next_solution)
21 |
22 | if next_score <= current_score:
23 | current_solution = next_solution
24 | current_score = next_score
25 | else:
26 | break
27 |
28 | return current_solution
29 |
30 | def random_solution(self):
31 | return {
32 | operation: random.randint(0, operation.get_latest_start_time())
33 | for job in self.job_collector.jobs
34 | for operation in job.route.operations
35 | }
36 |
37 | def get_neighbors(self, solution):
38 | neighbors = []
39 | for operation, start_time in solution.items():
40 | for t in range(start_time - 1, operation.get_latest_start_time() + 1):
41 | neighbor = solution.copy()
42 | neighbor[operation] = t
43 | neighbors.append(neighbor)
44 | return neighbors
45 |
46 | def evaluate_solution(self, solution):
47 | end_times = {}
48 | for job in self.job_collector.jobs:
49 | for operation in job.route.operations:
50 | if operation in solution:
51 | start_time = solution[operation]
52 | else:
53 | start_time = operation.get_latest_start_time()
54 |
55 | end_time = start_time + operation.processing_time
56 | if operation.id not in end_times or end_time > end_times[operation.id]:
57 | end_times[operation.id] = end_time
58 |
59 | makespan = max(end_times.values())
60 | return makespan
61 |
--------------------------------------------------------------------------------
/lekin/solver/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | LeKin Scheduling Solver
3 |
4 | This module provides a flexible and extensible framework for solving scheduling problems.
5 | It supports various solving strategies including:
6 | - Continuous Time Planning (CTP)
7 | - Construction Heuristics
8 | - Meta-heuristics
9 | - Reinforcement Learning
10 | - Operation Research methods
11 | """
12 |
13 | from typing import Any, Dict, List, Optional
14 |
15 | from lekin.lekin_struct.job import Job
16 | from lekin.lekin_struct.resource import Resource
17 | from lekin.lekin_struct.route import Route
18 | from lekin.solver.core.base_solver import BaseSolver
19 | from lekin.solver.core.ctp_solver import CTPSolver
20 |
21 |
22 | def create_solver(solver_type: str, config: Optional[Dict[str, Any]] = None) -> BaseSolver:
23 | """Create a solver instance of the specified type.
24 |
25 | Args:
26 | solver_type: Type of solver to create ('ctp', 'construction', 'meta', 'rl', 'or')
27 | config: Optional configuration dictionary
28 |
29 | Returns:
30 | A solver instance
31 |
32 | Raises:
33 | ValueError: If solver_type is not supported
34 | """
35 | solvers = {
36 | "ctp": CTPSolver,
37 | # Add other solver types here as they are implemented
38 | }
39 |
40 | if solver_type not in solvers:
41 | raise ValueError(f"Unsupported solver type: {solver_type}")
42 |
43 | return solvers[solver_type](config)
44 |
45 |
46 | def solve_scheduling_problem(
47 | jobs: List[Job],
48 | routes: List[Route],
49 | resources: List[Resource],
50 | solver_type: str = "ctp",
51 | config: Optional[Dict[str, Any]] = None,
52 | ) -> Dict[str, Any]:
53 | """Solve a scheduling problem using the specified solver.
54 |
55 | Args:
56 | jobs: List of jobs to be scheduled
57 | routes: List of available routes
58 | resources: List of available resources
59 | solver_type: Type of solver to use
60 | config: Optional configuration dictionary
61 |
62 | Returns:
63 | Dictionary containing the solution and metadata
64 |
65 | Raises:
66 | ValueError: If solver_type is not supported
67 | """
68 | solver = create_solver(solver_type, config)
69 | return solver.solve(jobs, routes, resources)
70 |
71 |
72 | __all__ = ["create_solver", "solve_scheduling_problem", "BaseSolver", "CTPSolver"]
73 |
--------------------------------------------------------------------------------
/lekin/dashboard/gantt.py:
--------------------------------------------------------------------------------
1 | """
2 | Gantt
3 | """
4 |
5 | import logging
6 | from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Type, Union
7 |
8 | from matplotlib import ticker
9 | import matplotlib.patches as patches
10 | import matplotlib.pyplot as plt
11 | import pandas as pd
12 |
13 | logging.getLogger("matplotlib.font_manager").disabled = True
14 |
15 |
16 | def get_scheduling_res_from_all_jobs(job_collector):
17 | ops = []
18 | for job in job_collector.job_list:
19 | ops += job.operations
20 |
21 | scheduling_res = []
22 | for op in ops:
23 | scheduling_res.append(
24 | [
25 | op.operation_id,
26 | op.parent_job_id,
27 | op.quantity,
28 | op.assigned_resource.resource_id,
29 | min(op.assigned_hours),
30 | max(op.assigned_hours),
31 | ]
32 | )
33 | scheduling_res = pd.DataFrame(scheduling_res, columns=["Operation", "Job", "Quantity", "Resource", "Start", "End"])
34 | scheduling_res["Duration"] = scheduling_res["End"] - scheduling_res["Start"] # + 1
35 | return scheduling_res
36 |
37 |
38 | def plot_gantt_chart(job_collector, scheduling_res):
39 | color_dict = job_collector.generate_color_list_for_jobs()
40 |
41 | # gantt
42 | resource_list = []
43 | for resource, group in scheduling_res.groupby("Resource"):
44 | resource_list.append(resource)
45 | start_tuple = []
46 | color_tuple = []
47 | for _, op in group.iterrows():
48 | start_tuple.append(op[["Start", "Duration"]].tolist())
49 | color_tuple.append(color_dict.get(op["Job"]))
50 |
51 | plt.gca().broken_barh(start_tuple, ((resource + 1) * 10, 9), facecolors=color_tuple)
52 |
53 | # legend
54 | legends_colors = []
55 | for job in job_collector.job_list:
56 | legends_colors.append(patches.Patch(color=color_dict.get(job.job_id), label=f"job{job.job_id}"))
57 | plt.legend(handles=legends_colors, fontsize=8)
58 |
59 | # resource tick
60 | resources = resource_list # list(reversed(resource_list))
61 | resource_ticks = [15]
62 | for i in range(len(resources)):
63 | resource_ticks.append(resource_ticks[i] + 10) # machine increase + 10
64 | plt.yticks(resource_ticks[1:], resources)
65 |
66 | plt.grid(True)
67 | plt.xlabel("Time")
68 | plt.ylabel("Resources")
69 | plt.title("Gantt Chart - Scheduling Result")
70 | plt.show()
71 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/mix.py:
--------------------------------------------------------------------------------
1 | import heapq
2 |
3 |
4 | class JobScheduler:
5 | def __init__(self, available_slots, jobs, routes, operations, resources):
6 | self.available_slots = available_slots
7 | heapq.heapify(self.available_slots) # Convert the list into a min heap
8 | self.jobs = jobs
9 | self.routes = routes
10 | self.operations = operations
11 | self.resources = resources
12 |
13 | def backward_schedule(self):
14 | # Implement your initial backward scheduling pass here
15 | pass
16 |
17 | def assign_resource(self, operation, available_resources):
18 | # Implement resource assignment logic based on availability and scoring
19 | pass
20 |
21 | def analyze_schedule_density(self):
22 | # Implement schedule density analysis
23 | pass
24 |
25 | def identify_bottleneck_resources(self):
26 | # Identify bottleneck resources limiting density
27 | pass
28 |
29 | def push_operations_closer(self, bottleneck_resources):
30 | # Push operations closer on bottleneck resources
31 | pass
32 |
33 | def rescore_operations(self):
34 | # Rescore operations based on priority and slack time
35 | pass
36 |
37 | def reassign_operations(self):
38 | # Reassign operations to reduce gaps
39 | pass
40 |
41 | def reevaluate_routes(self, critical_jobs):
42 | # Re-evaluate routes for critical jobs
43 | pass
44 |
45 | def reschedule_operations(self, critical_jobs):
46 | # Reschedule operations on preferred resources for critical jobs
47 | pass
48 |
49 | def push_dense(self):
50 | # Iteratively push operations closer system-wide
51 | # while True:
52 | # self.backward_schedule()
53 | # density = self.analyze_schedule_density()
54 | # if density is not improved:
55 | # break
56 | pass
57 |
58 | def final_tweaking(self):
59 | # Fine-tune schedules of critical jobs and leverage flexibilities
60 | pass
61 |
62 | def optimize_schedule(self):
63 | self.push_dense()
64 | critical_jobs = self.identify_critical_jobs()
65 | self.reevaluate_routes(critical_jobs)
66 | self.reschedule_operations(critical_jobs)
67 | self.final_tweaking()
68 | return self.schedule
69 |
70 | def identify_critical_jobs(self):
71 | # Identify critical jobs on the schedule
72 | pass
73 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 120
3 | include = '\.pyi?$'
4 | exclude = '''
5 | (
6 | /(
7 | \.eggs # exclude a few common directories in the
8 | | \.git # root of the project
9 | | \.hg
10 | | \.mypy_cache
11 | | \.tox
12 | | \.venv
13 | | _build
14 | | buck-out
15 | | build
16 | | dist
17 | )/
18 | | docs/build/
19 | | node_modules/
20 | | venve/
21 | | .venv/
22 | )
23 | '''
24 |
25 | [tool.nbqa.mutate]
26 | isort = 1
27 | black = 1
28 |
29 | [tool.poetry]
30 | name = "lekin"
31 | readme = "README.md" # Markdown files are supported
32 | version = "0.0.2" # is being replaced automatically
33 |
34 | authors = ["Hongying Yue"]
35 | classifiers = [
36 | "Intended Audience :: Developers",
37 | "Intended Audience :: Science/Research",
38 | "Programming Language :: Python :: 3",
39 | "Programming Language :: Python :: 3.8",
40 | "Programming Language :: Python :: 3.9",
41 | "Programming Language :: Python :: 3.10",
42 | "Topic :: Scientific/Engineering",
43 | "Topic :: Scientific/Engineering :: Mathematics",
44 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
45 | "Topic :: Software Development",
46 | "Topic :: Software Development :: Libraries",
47 | "Topic :: Software Development :: Libraries :: Python Modules",
48 | "License :: OSI Approved :: MIT License"]
49 | description = "Flexible job shop scheduler in Python"
50 | repository = "https://github.com/HongyingYue/python-lekin"
51 | documentation = "https://python-lekin.readthedocs.io"
52 | homepage = "https://python-lekin.readthedocs.io"
53 |
54 | [tool.poetry.dependencies]
55 | python = ">=3.7"
56 | #pandas = "^1.1.0"
57 | matplotlib = "*"
58 |
59 | [tool.poetry.dev-dependencies]
60 | # checks and make tools
61 | pre-commit = "^2.20.0"
62 |
63 | invoke = "*"
64 | flake8 = "*"
65 | mypy = "*"
66 | pylint = "*"
67 | isort = "*"
68 | coverage = "*"
69 |
70 | # jupyter notebook
71 | ipykernel = "*"
72 | black = { version = "*", allow-prereleases = true, extras = ["jupyter"] }
73 |
74 | # documentatation
75 | sphinx = "*"
76 | pydata-sphinx-theme = "*"
77 | nbsphinx = "*"
78 | # pandoc = "*"
79 | recommonmark = "*"
80 | ipywidgets = "^8.0.1"
81 |
82 | [tool.poetry-dynamic-versioning]
83 | enable = true
84 | vcs = "git"
85 | dirty = false
86 | style = "semver" # semantic versioning
87 |
88 | [build-system] # make the package pip installable
89 | requires = ["poetry-core>=1.0.7", "poetry-dynamic-versioning>=0.13.1"]
90 | build-backend = "poetry.core.masonry.api"
91 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/shifting_bottle_neck.py:
--------------------------------------------------------------------------------
1 | """Shifting bottle neck meta_heuristics"""
2 |
3 | import copy
4 |
5 |
6 | class ShiftingBottleneckScheduler:
7 | def __init__(self, job_collector):
8 | self.job_collector = job_collector
9 |
10 | def schedule(self):
11 | best_solution = self.generate_initial_solution()
12 | best_cost = self.calculate_cost(best_solution)
13 |
14 | for _ in range(1000): # Number of iterations
15 | current_solution = copy.deepcopy(best_solution)
16 | bottleneck_job, bottleneck_op = self.find_bottleneck_operation(current_solution)
17 |
18 | # Move bottleneck operation to different time slots
19 | for time_slot in range(self.job_collector.max_time):
20 | current_solution[bottleneck_job.id][bottleneck_op.id] = time_slot
21 | current_cost = self.calculate_cost(current_solution)
22 |
23 | if current_cost < best_cost:
24 | best_solution = current_solution
25 | best_cost = current_cost
26 |
27 | return best_solution
28 |
29 | def generate_initial_solution(self):
30 | # Randomly assign operations to time slots
31 | solution = {}
32 | for job in self.job_collector.jobs:
33 | solution[job.id] = {op.id: 0 for op in job.route.operations}
34 | return solution
35 |
36 | def find_bottleneck_operation(self, solution):
37 | # Find the operation with the longest processing time in the schedule
38 | max_processing_time = 0
39 | bottleneck_job = None
40 | bottleneck_op = None
41 |
42 | for job in self.job_collector.jobs:
43 | for operation in job.route.operations:
44 | processing_time = operation.processing_time
45 | start_time = solution[job.id][operation.id]
46 | end_time = start_time + processing_time
47 |
48 | if end_time > max_processing_time:
49 | max_processing_time = end_time
50 | bottleneck_job = job
51 | bottleneck_op = operation
52 |
53 | return bottleneck_job, bottleneck_op
54 |
55 | def calculate_cost(self, solution):
56 | # Calculate the makespan of the schedule
57 | max_end_time = 0
58 | for job in self.job_collector.jobs:
59 | end_time = max(
60 | [solution[job.id][operation.id] + operation.processing_time for operation in job.route.operations]
61 | )
62 | max_end_time = max(max_end_time, end_time)
63 | return max_end_time
64 |
--------------------------------------------------------------------------------
/lekin/solver/solver.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import random
3 | import time
4 |
5 |
6 | def check_constraints(job_assignments):
7 | for i, assignment1 in enumerate(job_assignments):
8 | for j, assignment2 in enumerate(job_assignments):
9 | if i < j:
10 | # Check resource conflict
11 | if assignment1.resource == assignment2.resource:
12 | if not (
13 | assignment1.timeslot.end_time <= assignment2.timeslot.start_time
14 | or assignment2.timeslot.end_time <= assignment1.timeslot.start_time
15 | ):
16 | return False
17 | # Check timeslot conflict
18 | if assignment1.timeslot is None or assignment2.timeslot is None:
19 | return False
20 | return True
21 |
22 |
23 | class LekinSolver(object):
24 | def __init__(self, config):
25 | self.config = config
26 | self.best_solution = None
27 |
28 | def solve(self, schedule):
29 | start_time = time.time()
30 | current_solution = copy.deepcopy(schedule)
31 | self.best_solution = current_solution
32 | tabu_list = []
33 | iterations = 0
34 |
35 | while not self._is_termination_reached(start_time, iterations):
36 | neighbors = self.config.move_selector.generate_neighbors(current_solution)
37 | feasible_neighbors = [neighbor for neighbor in neighbors if check_constraints(neighbor.job_assignments)]
38 |
39 | if not feasible_neighbors:
40 | continue
41 |
42 | feasible_neighbors.sort(key=self.config.entity_selector.evaluate)
43 | current_solution = feasible_neighbors[0]
44 | current_cost = self.config.entity_selector.evaluate(current_solution)
45 | best_cost = self.config.entity_selector.evaluate(self.best_solution)
46 |
47 | if current_cost < best_cost:
48 | self.best_solution = current_solution
49 |
50 | tabu_list.append(current_solution)
51 | if len(tabu_list) > self.config.entity_selector.tabu_tenure:
52 | tabu_list.pop(0)
53 |
54 | iterations += 1
55 |
56 | return self.best_solution
57 |
58 | def _is_termination_reached(self, start_time, iterations):
59 | if self.config.termination.seconds_spent_limit:
60 | if time.time() - start_time > self.config.termination.seconds_spent_limit:
61 | return True
62 | if self.config.termination.max_iterations:
63 | if iterations >= self.config.termination.max_iterations:
64 | return True
65 | return False
66 |
--------------------------------------------------------------------------------
/README_zh_CN.md:
--------------------------------------------------------------------------------
1 | [license-image]: https://img.shields.io/badge/License-Apache%202.0-blue.svg
2 | [license-url]: https://opensource.org/licenses/Apache-2.0
3 | [pypi-image]: https://badge.fury.io/py/lekin.svg
4 | [pypi-url]: https://pypi.python.org/pypi/lekin
5 | [pepy-image]: https://pepy.tech/badge/lekin/month
6 | [pepy-url]: https://pepy.tech/project/lekin
7 | [build-image]: https://github.com/HongyingYue/python-lekin/actions/workflows/test.yml/badge.svg?branch=main
8 | [build-url]: https://github.com/HongyingYue/python-lekin/actions/workflows/test.yml?query=branch%3Amain
9 | [lint-image]: https://github.com/HongyingYue/python-lekin/actions/workflows/lint.yml/badge.svg?branch=main
10 | [lint-url]: https://github.com/HongyingYue/python-lekin/actions/workflows/lint.yml?query=branch%3Amain
11 | [docs-image]: https://readthedocs.org/projects/python-lekin/badge/?version=latest
12 | [docs-url]: https://python-lekin.readthedocs.io/en/latest/
13 |
14 |
15 |
16 |
17 |
18 | [![LICENSE][license-image]][license-url]
19 | [![PyPI Version][pypi-image]][pypi-url]
20 | [![Download][pepy-image]][pepy-url]
21 | [![Build Status][build-image]][build-url]
22 | [![Lint Status][lint-image]][lint-url]
23 | [![Docs Status][docs-image]][docs-url]
24 |
25 | **[文档](https://python-lekin.readthedocs.io)** | **[教程](https://python-lekin.readthedocs.io/en/latest/tutorials.html)** | **[发布日志](https://python-lekin.readthedocs.io/en/latest/CHANGELOG.html)** | **[English](https://github.com/HongyingYue/python-lekin/blob/main/README.md)**
26 |
27 | **python-lekin**是一个APS智能排产调度工具。考虑实际约束的前提下,实现动态调整计划排程,高效响应客户订单承诺。
28 |
29 | - 支持工艺路线约束
30 | - 支持产能约束
31 | - 支持物料齐套约束
32 | - 支持顺排、倒排等排产方法
33 | - 支持遗传算法排产
34 | - 支持强化学习排产
35 |
36 | # **开发中- 目前请不要使用包,可用代码跑和学习!**
37 |
38 | ## 快速入门
39 |
40 | [](https://colab.research.google.com/drive/1zHqYZFZNvLE7aoDcBUh7TCK7oHfXQkLi?usp=sharing)
41 |
42 |
43 | ### 安装
44 |
45 | ``` shell
46 | $ pip install lekin
47 | ```
48 |
49 | ### 使用
50 |
51 | ``` python
52 | from lekin import Heuristics, Genetics
53 | from lekin import Scheduler
54 |
55 | solver = Heuristics()
56 | scheduler = Scheduler(solver)
57 | scheduler.solve(jobs, machines)
58 |
59 | scheduler.draw()
60 | ```
61 |
62 | ## 示例
63 | 在实际APS系统开发中,
64 |
65 | - 按工艺路线拆分工序
66 | - 按BOM拆分物料
67 |
68 | ### 数据准备
69 |
70 | - Job
71 | - Task
72 | - Machine
73 | - Route
74 |
75 |
76 | ## 引用
77 |
78 | ```
79 | @misc{python-lekin2022,
80 | author = {Hongying Yue},
81 | title = {python lekin},
82 | year = {2022},
83 | publisher = {GitHub},
84 | journal = {GitHub repository},
85 | howpublished = {\url{https://github.com/hongyingyue/python-lekin}},
86 | }
87 | ```
88 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/branch_and_bound.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 | from itertools import permutations
3 |
4 | from lekin.lekin_struct import TimeSlot
5 |
6 |
7 | class BranchAndBoundScheduler:
8 | def __init__(self, job_list, resource_list):
9 | self.job_list = job_list
10 | self.resource_list = resource_list
11 | self.best_schedule = None
12 | self.best_cost = float("inf")
13 |
14 | def find_available_time_slots(self, operation, resource, current_time):
15 | # Implement the logic to find available time slots for a given operation and resource
16 | available_time_slots = []
17 | for slot in resource.available_time_slots:
18 | if slot.start_time >= current_time + operation.processing_time:
19 | available_time_slots.append(slot)
20 | return available_time_slots
21 |
22 | def assign_operation(self, job, operation, resource, start_time):
23 | # Implement the logic to assign an operation to a resource at a specific start time
24 | end_time = start_time + operation.processing_time
25 | resource.available_time_slots.append(TimeSlot(end_time, float("inf")))
26 | return end_time
27 |
28 | def calculate_cost(self, schedule):
29 | # Implement the logic to calculate the cost of the current schedule
30 | # For example, you can calculate makespan or any other relevant metric
31 | makespan = max(slot.end_time for slot in schedule)
32 | return makespan
33 |
34 | def branch_and_bound(self, current_job_idx, current_time, schedule):
35 | # Implement the Branch and Bound algorithm for scheduling
36 | if current_job_idx == len(self.job_list):
37 | cost = self.calculate_cost(schedule)
38 | if cost < self.best_cost:
39 | self.best_cost = cost
40 | self.best_schedule = schedule.copy()
41 | return
42 |
43 | job = self.job_list[current_job_idx]
44 | route = self.resource_list[job.assigned_route_id]
45 |
46 | for resource_id in route.operations[0].resource_ids:
47 | resource = self.resource_list[resource_id]
48 | time_slots = self.find_available_time_slots(route.operations[0], resource, current_time)
49 | for slot in time_slots:
50 | new_schedule = schedule + [slot]
51 | new_time = self.assign_operation(job, route.operations[0], resource, slot.start_time)
52 | self.branch_and_bound(current_job_idx + 1, new_time, new_schedule)
53 |
54 | def get_schedule(self):
55 | # Implement the main function to get the final schedule using Branch and Bound
56 | self.branch_and_bound(0, datetime(2023, 1, 1), [])
57 | return self.best_schedule
58 |
--------------------------------------------------------------------------------
/lekin/solver/operation_research/ortool.py:
--------------------------------------------------------------------------------
1 | from ortools.sat.python import cp_model
2 |
3 |
4 | class ORToolsScheduler:
5 | def __init__(self, job_collector):
6 | self.job_collector = job_collector
7 | self.model = cp_model.CpModel()
8 | self.vars = {}
9 |
10 | def schedule(self):
11 | self.create_variables()
12 | self.add_constraints()
13 | self.add_objective()
14 | solver = cp_model.CpSolver()
15 | status = solver.Solve(self.model)
16 | if status == cp_model.OPTIMAL:
17 | return self.get_schedule(solver)
18 | else:
19 | return None
20 |
21 | def create_variables(self):
22 | for job in self.job_collector.jobs:
23 | for operation in job.route.operations:
24 | self.vars[operation.id] = self.model.NewIntVar(
25 | 0, self.job_collector.max_time, f"Operation_{operation.id}_Start"
26 | )
27 |
28 | def add_constraints(self):
29 | for job in self.job_collector.jobs:
30 | for i, operation in enumerate(job.route.operations):
31 | # Constraint: Each operation starts after the end of its parent operation
32 | if i > 0:
33 | parent_operation = job.route.operations[i - 1]
34 | self.model.Add(
35 | self.vars[operation.id] >= self.vars[parent_operation.id] + parent_operation.processing_time
36 | )
37 |
38 | # Constraint: Each operation must be finished before the job's demand date
39 | self.model.Add(self.vars[operation.id] + operation.processing_time <= job.demand_date)
40 |
41 | for resource in self.job_collector.resources:
42 | for timeslot in resource.timeslots:
43 | for job in self.job_collector.jobs:
44 | for operation in job.route.operations:
45 | # Constraint: The operation must start within the resource's available timeslots
46 | self.model.Add(self.vars[operation.id] >= timeslot.start_time).OnlyEnforceIf(timeslot.is_used)
47 | self.model.Add(
48 | self.vars[operation.id] <= timeslot.end_time - operation.processing_time
49 | ).OnlyEnforceIf(timeslot.is_used)
50 |
51 | def add_objective(self):
52 | objective_var = self.model.NewIntVar(0, self.job_collector.max_time, "Makespan")
53 | self.model.AddMaxEquality(
54 | objective_var,
55 | [
56 | self.vars[operation.id] + operation.processing_time
57 | for job in self.job_collector.jobs
58 | for operation in job.route.operations
59 | ],
60 | )
61 | self.model.Minimize(objective_var)
62 |
63 | def get_schedule(self, solver):
64 | schedule = {}
65 | for job in self.job_collector.jobs:
66 | for operation in job.route.operations:
67 | start_time = solver.Value(self.vars[operation.id])
68 | schedule[(job.id, operation.id)] = start_time
69 | return schedule
70 |
--------------------------------------------------------------------------------
/lekin/solver/utils/time_slot.py:
--------------------------------------------------------------------------------
1 | """
2 | Time slot management for scheduling operations.
3 | """
4 |
5 | from dataclasses import dataclass
6 | from datetime import datetime, timedelta
7 | from typing import List, Optional
8 |
9 |
10 | @dataclass
11 | class TimeSlot:
12 | """Represents a time slot for scheduling operations."""
13 |
14 | start_time: datetime
15 | end_time: datetime
16 |
17 | @property
18 | def duration(self) -> float:
19 | """Get the duration of the time slot in hours."""
20 | return (self.end_time - self.start_time).total_seconds() / 3600
21 |
22 | def overlaps_with(self, other: "TimeSlot") -> bool:
23 | """Check if this time slot overlaps with another.
24 |
25 | Args:
26 | other: Another time slot to check against
27 |
28 | Returns:
29 | True if time slots overlap, False otherwise
30 | """
31 | return self.start_time < other.end_time and self.end_time > other.start_time
32 |
33 | def contains(self, other: "TimeSlot") -> bool:
34 | """Check if this time slot completely contains another.
35 |
36 | Args:
37 | other: Another time slot to check against
38 |
39 | Returns:
40 | True if this time slot contains the other, False otherwise
41 | """
42 | return self.start_time <= other.start_time and self.end_time >= other.end_time
43 |
44 | def split(self, split_time: datetime) -> List["TimeSlot"]:
45 | """Split the time slot at a given time.
46 |
47 | Args:
48 | split_time: The time at which to split the slot
49 |
50 | Returns:
51 | List of two time slots if split is valid, empty list otherwise
52 | """
53 | if not (self.start_time < split_time < self.end_time):
54 | return []
55 |
56 | return [TimeSlot(self.start_time, split_time), TimeSlot(split_time, self.end_time)]
57 |
58 | def merge(self, other: "TimeSlot") -> Optional["TimeSlot"]:
59 | """Merge this time slot with another if they are adjacent or overlapping.
60 |
61 | Args:
62 | other: Another time slot to merge with
63 |
64 | Returns:
65 | Merged time slot if merge is possible, None otherwise
66 | """
67 | if not (self.overlaps_with(other) or self.end_time == other.start_time or self.start_time == other.end_time):
68 | return None
69 |
70 | return TimeSlot(min(self.start_time, other.start_time), max(self.end_time, other.end_time))
71 |
72 | def subtract(self, other: "TimeSlot") -> List["TimeSlot"]:
73 | """Subtract another time slot from this one.
74 |
75 | Args:
76 | other: Time slot to subtract
77 |
78 | Returns:
79 | List of remaining time slots
80 | """
81 | if not self.overlaps_with(other):
82 | return [self]
83 |
84 | result = []
85 |
86 | if self.start_time < other.start_time:
87 | result.append(TimeSlot(self.start_time, other.start_time))
88 |
89 | if self.end_time > other.end_time:
90 | result.append(TimeSlot(other.end_time, self.end_time))
91 |
92 | return result
93 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/critical_path.py:
--------------------------------------------------------------------------------
1 | """Critical path"""
2 |
3 | import copy
4 | from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Type, Union
5 |
6 |
7 | class CriticalPathScheduler:
8 | def __init__(self, job_collector):
9 | self.job_collector = job_collector
10 |
11 | def schedule(self):
12 | # Generate the forward and backward pass times
13 | forward_pass_times = self.forward_pass()
14 | backward_pass_times = self.backward_pass()
15 |
16 | # Find the critical path
17 | critical_path = self.find_critical_path(forward_pass_times, backward_pass_times)
18 |
19 | # Assign start times for each operation in the critical path
20 | critical_path_schedule = {}
21 | current_time = 0
22 | for job_id, operation_id in critical_path:
23 | operation = self.job_collector.get_operation_by_id(job_id, operation_id)
24 | critical_path_schedule[operation] = current_time
25 | current_time += operation.processing_time
26 |
27 | return critical_path_schedule
28 |
29 | def forward_pass(self):
30 | forward_pass_times: Dict[str, Dict[str, float]] = {}
31 | for job in self.job_collector.jobs:
32 | forward_pass_times[job.id] = {}
33 | for operation in job.route.operations:
34 | if operation.id == 0:
35 | forward_pass_times[job.id][operation.id] = 0
36 | else:
37 | predecessors = operation.get_predecessors()
38 | max_predecessor_time = max(
39 | [forward_pass_times[job.id][pred.id] + pred.processing_time for pred in predecessors]
40 | )
41 | forward_pass_times[job.id][operation.id] = max_predecessor_time
42 |
43 | return forward_pass_times
44 |
45 | def backward_pass(self):
46 | backward_pass_times: Dict[str, Dict[str, float]] = {}
47 | for job in self.job_collector.jobs:
48 | backward_pass_times[job.id] = {}
49 | for operation in reversed(job.route.operations):
50 | if operation.id == len(job.route.operations) - 1:
51 | backward_pass_times[job.id][operation.id] = operation.processing_time
52 | else:
53 | successors = operation.get_successors()
54 | min_successor_time = min(
55 | [backward_pass_times[job.id][succ.id] + succ.processing_time for succ in successors]
56 | )
57 | backward_pass_times[job.id][operation.id] = min_successor_time
58 |
59 | return backward_pass_times
60 |
61 | def find_critical_path(self, forward_pass_times, backward_pass_times):
62 | critical_path = []
63 | for job in self.job_collector.jobs:
64 | for operation in job.route.operations:
65 | start_time = forward_pass_times[job.id][operation.id]
66 | end_time = backward_pass_times[job.id][operation.id]
67 | if start_time + operation.processing_time == end_time:
68 | critical_path.append((job.id, operation.id))
69 |
70 | return critical_path
71 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Test
5 |
6 | on:
7 | push:
8 | branches: [main, dev]
9 | pull_request:
10 | branches: [main, dev]
11 |
12 | jobs:
13 | build:
14 | runs-on: ${{ matrix.os }}
15 | strategy:
16 | matrix:
17 | os: [ubuntu-latest, macos-13]
18 | python-version: [3.8, '3.12']
19 |
20 | steps:
21 | - uses: actions/checkout@v4
22 |
23 | - name: Set up Python ${{ matrix.python-version }}
24 | uses: actions/setup-python@v5
25 | with:
26 | python-version: ${{ matrix.python-version }}
27 |
28 | - name: Get full Python version
29 | id: full-python-version
30 | shell: bash
31 | run: echo "version=$(python -c "import sys; print('-'.join(str(v) for v in sys.version_info))")" >> $GITHUB_OUTPUT
32 |
33 | - name: Cache pip packages
34 | uses: actions/cache@v4
35 | with:
36 | path: ~/.cache/pip
37 | key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
38 | restore-keys: |
39 | ${{ runner.os }}-pip-
40 |
41 | - name: Upgrade pip
42 | shell: bash
43 | run: python -m pip install --upgrade pip
44 |
45 | - name: Install dependencies
46 | shell: bash
47 | run: |
48 | pip install --no-cache-dir -r requirements.txt
49 | pip install --no-cache-dir coverage pytest codecov-cli>=0.4.1
50 |
51 | - name: Run unittest
52 | shell: bash
53 | run: coverage run -m unittest discover -s ./tests -p 'test_*.py'
54 |
55 | - name: Statistics
56 | if: success()
57 | run: |
58 | coverage report -i
59 | coverage xml -i
60 |
61 | - name: Upload coverage reports to Codecov
62 | uses: codecov/codecov-action@v4.0.1
63 | if: always()
64 | continue-on-error: true
65 | with:
66 | token: ${{ secrets.CODECOV_TOKEN }}
67 | file: coverage.xml
68 | flags: unittest
69 | name: coverage-report
70 | fail_ci_if_error: false
71 |
72 | docs:
73 | name: Test docs build
74 | runs-on: ubuntu-latest
75 |
76 | steps:
77 | - name: Check out Git repository
78 | uses: actions/checkout@v4
79 |
80 | - name: Set up Python
81 | uses: actions/setup-python@v5
82 | with:
83 | python-version: 3.11
84 |
85 | - name: Cache pip
86 | uses: actions/cache@v4
87 | with:
88 | path: ~/.cache/pip
89 | key: ${{ runner.os }}-pip-${{ hashFiles('docs/requirements_docs.txt') }}
90 | restore-keys: |
91 | ${{ runner.os }}-pip-
92 |
93 | - name: Install dependencies
94 | run: |
95 | sudo apt-get update && sudo apt-get install -y pandoc
96 | python -m pip install --upgrade pip
97 | pip install "sphinx<8.0.0" "nbsphinx<0.9.0" -r docs/requirements_docs.txt
98 | shell: bash
99 |
100 | - name: Build sphinx documentation
101 | run: |
102 | cd docs
103 | make clean
104 | make html --debug --jobs 2 SPHINXOPTS=""
105 |
106 | - name: Upload built docs
107 | uses: actions/upload-artifact@v4
108 | with:
109 | name: docs-results
110 | path: docs/build/html/
111 | if: success()
112 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/nsga3.py:
--------------------------------------------------------------------------------
1 | """
2 | https://blog.csdn.net/weixin_44624036/article/details/133893810
3 | """
4 |
5 | import random
6 | from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Type, Union
7 |
8 |
9 | class NSGA3Scheduler:
10 | def __init__(self, jobs, routes, num_generations, population_size):
11 | self.jobs = jobs
12 | self.routes = routes
13 | self.num_generations = num_generations
14 | self.population_size = population_size
15 | self.population = []
16 | self.fronts = []
17 |
18 | def initialize_population(self):
19 | # Generate an initial random population
20 | for _ in range(self.population_size):
21 | solution = self.generate_random_solution()
22 | self.population.append(solution)
23 |
24 | def generate_random_solution(self):
25 | # Generate a random solution representing start times for operations
26 | solution: Dict = {}
27 | for job in self.jobs:
28 | solution[job] = {}
29 | for operation in job.route.operations:
30 | solution[job][operation] = random.randint(0, 100) # Random start time
31 | return solution
32 |
33 | def evaluate_objectives(self, solution):
34 | # Evaluate the objectives for a given solution
35 | makespan = self.calculate_makespan(solution)
36 | total_tardiness = self.calculate_total_tardiness(solution)
37 | resource_utilization = self.calculate_resource_utilization(solution)
38 | return makespan, total_tardiness, resource_utilization
39 |
40 | def calculate_makespan(self, solution):
41 | # Calculate the makespan for a given solution
42 | # Implementation specific to your job shop scheduling problem
43 | pass
44 |
45 | def calculate_total_tardiness(self, solution):
46 | # Calculate the total tardiness for a given solution
47 | # Implementation specific to your job shop scheduling problem
48 | pass
49 |
50 | def calculate_resource_utilization(self, solution):
51 | # Calculate the resource utilization for a given solution
52 | # Implementation specific to your job shop scheduling problem
53 | pass
54 |
55 | def run(self):
56 | self.initialize_population()
57 | for generation in range(self.num_generations):
58 | self.fast_nondominated_sort()
59 | self.crowding_distance()
60 | self.selection()
61 | self.crossover()
62 | self.mutation()
63 |
64 | def fast_nondominated_sort(self):
65 | # Implement NSGA-III's fast non-dominated sorting
66 | # Categorize solutions into different fronts based on their dominance relationships
67 | pass
68 |
69 | def crowding_distance(self):
70 | # Implement NSGA-III's crowding distance calculation
71 | # Calculate the crowding distance for each solution in each front
72 | pass
73 |
74 | def selection(self):
75 | # Implement NSGA-III's selection mechanism
76 | # Select the best solutions based on their non-dominated ranks and crowding distances
77 | pass
78 |
79 | def crossover(self):
80 | # Implement NSGA-III's crossover operator
81 | # Perform crossover to create offspring solutions
82 | pass
83 |
84 | def mutation(self):
85 | # Implement NSGA-III's mutation operator
86 | # Perform mutation to introduce diversity in the population
87 | pass
88 |
89 | def get_pareto_front(self):
90 | # Get the Pareto front solutions after the algorithm has run
91 | return self.fronts[0]
92 |
--------------------------------------------------------------------------------
/lekin/solver/core/base_solver.py:
--------------------------------------------------------------------------------
1 | """
2 | Base solver interface for the LeKin scheduling system.
3 | """
4 |
5 | from abc import ABC, abstractmethod
6 | import logging
7 | from typing import Any, Dict, Optional
8 |
9 | from lekin.solver.core.problem import Problem
10 | from lekin.solver.core.solution import Solution
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 |
15 | class BaseSolver(ABC):
16 | """Abstract base class for all scheduling solvers."""
17 |
18 | def __init__(self, config: Optional[Dict[str, Any]] = None):
19 | """Initialize the solver with optional configuration.
20 |
21 | Args:
22 | config: Optional configuration dictionary for solver parameters
23 | """
24 | self.config = config or {}
25 | self._validate_config()
26 |
27 | @abstractmethod
28 | def solve(self, problem: Problem) -> Solution:
29 | """Solve the scheduling problem.
30 |
31 | Args:
32 | problem: The scheduling problem to solve
33 |
34 | Returns:
35 | A solution to the problem
36 |
37 | Raises:
38 | ValueError: If the problem is invalid
39 | RuntimeError: If solving fails
40 | """
41 | pass
42 |
43 | @abstractmethod
44 | def validate_solution(self, solution: Solution) -> bool:
45 | """Validate if a solution meets all constraints.
46 |
47 | Args:
48 | solution: The solution to validate
49 |
50 | Returns:
51 | True if solution is valid, False otherwise
52 | """
53 | pass
54 |
55 | def _validate_config(self) -> None:
56 | """Validate the solver configuration.
57 |
58 | Raises:
59 | ValueError: If configuration is invalid
60 | """
61 | pass
62 |
63 | def get_solution_metrics(self, solution: Solution) -> Dict[str, float]:
64 | """Calculate key performance metrics for a solution.
65 |
66 | Args:
67 | solution: The solution to evaluate
68 |
69 | Returns:
70 | Dictionary of metric names and values
71 | """
72 | return {
73 | "makespan": solution.get_makespan(),
74 | "resource_utilization": sum(
75 | solution.get_resource_utilization(r.resource_id) for r in solution.problem.resources
76 | )
77 | / len(solution.problem.resources),
78 | "tardiness": solution.get_tardiness(),
79 | }
80 |
81 | def log_solution_metrics(self, solution: Solution) -> None:
82 | """Log the performance metrics of a solution.
83 |
84 | Args:
85 | solution: The solution to log metrics for
86 | """
87 | metrics = self.get_solution_metrics(solution)
88 | logger.info("Solution metrics:")
89 | for metric, value in metrics.items():
90 | logger.info(f" {metric}: {value:.2f}")
91 |
92 | def save_solution(self, solution: Solution, filepath: str) -> None:
93 | """Save a solution to a file.
94 |
95 | Args:
96 | solution: The solution to save
97 | filepath: Path to save the solution to
98 | """
99 | import json
100 |
101 | with open(filepath, "w") as f:
102 | json.dump(solution.to_dict(), f, indent=2)
103 |
104 | @classmethod
105 | def load_solution(cls, filepath: str) -> Solution:
106 | """Load a solution from a file.
107 |
108 | Args:
109 | filepath: Path to load the solution from
110 |
111 | Returns:
112 | The loaded solution
113 | """
114 | import json
115 |
116 | with open(filepath, "r") as f:
117 | return Solution.from_dict(json.load(f))
118 |
--------------------------------------------------------------------------------
/lekin/solver/constraints/base.py:
--------------------------------------------------------------------------------
1 | """
2 | Base constraint system for scheduling problems.
3 | """
4 |
5 | from abc import ABC, abstractmethod
6 | import logging
7 | from typing import Any, Dict, List, Optional
8 |
9 | from lekin.solver.core.solution import Solution
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 |
14 | class BaseConstraint(ABC):
15 | """Base class for all scheduling constraints."""
16 |
17 | def __init__(self, config: Optional[Dict[str, Any]] = None):
18 | """Initialize the constraint.
19 |
20 | Args:
21 | config: Optional configuration dictionary
22 | """
23 | self.config = config or {}
24 | self._validate_config()
25 |
26 | @abstractmethod
27 | def check(self, solution: Solution) -> bool:
28 | """Check if the solution satisfies this constraint.
29 |
30 | Args:
31 | solution: The solution to check
32 |
33 | Returns:
34 | True if constraint is satisfied, False otherwise
35 | """
36 | pass
37 |
38 | def _validate_config(self) -> None:
39 | """Validate the constraint configuration.
40 |
41 | Raises:
42 | ValueError: If configuration is invalid
43 | """
44 | pass
45 |
46 | def get_violations(self, solution: Solution) -> List[Dict[str, Any]]:
47 | """Get detailed information about constraint violations.
48 |
49 | Args:
50 | solution: The solution to check
51 |
52 | Returns:
53 | List of violation details
54 | """
55 | return []
56 |
57 |
58 | class ConstraintManager:
59 | """Manages a collection of constraints."""
60 |
61 | def __init__(self):
62 | """Initialize the constraint manager."""
63 | self.constraints: List[BaseConstraint] = []
64 |
65 | def add_constraint(self, constraint: BaseConstraint) -> None:
66 | """Add a constraint to the manager.
67 |
68 | Args:
69 | constraint: The constraint to add
70 | """
71 | self.constraints.append(constraint)
72 |
73 | def check_all(self, solution: Solution) -> bool:
74 | """Check if a solution satisfies all constraints.
75 |
76 | Args:
77 | solution: The solution to check
78 |
79 | Returns:
80 | True if all constraints are satisfied, False otherwise
81 | """
82 | try:
83 | return all(constraint.check(solution) for constraint in self.constraints)
84 | except Exception as e:
85 | logger.error(f"Error checking constraints: {str(e)}")
86 | return False
87 |
88 | def get_all_violations(self, solution: Solution) -> List[Dict[str, Any]]:
89 | """Get all constraint violations in a solution.
90 |
91 | Args:
92 | solution: The solution to check
93 |
94 | Returns:
95 | List of all constraint violations
96 | """
97 | violations = []
98 | for constraint in self.constraints:
99 | if not constraint.check(solution):
100 | violations.extend(constraint.get_violations(solution))
101 | return violations
102 |
103 | def log_violations(self, solution: Solution) -> None:
104 | """Log all constraint violations in a solution.
105 |
106 | Args:
107 | solution: The solution to check
108 | """
109 | violations = self.get_all_violations(solution)
110 | if violations:
111 | logger.warning(f"Found {len(violations)} constraint violations:")
112 | for violation in violations:
113 | logger.warning(f" - {violation}")
114 | else:
115 | logger.info("No constraint violations found")
116 |
--------------------------------------------------------------------------------
/examples/jobshop_example.py:
--------------------------------------------------------------------------------
1 | """Demo"""
2 |
3 | import copy
4 |
5 | from lekin.solver.config import SolverConfig, TerminationConfig
6 | from lekin.solver.solver import LekinSolver
7 |
8 |
9 | def PlanningEntity(cls):
10 | cls._is_planning_entity = True
11 | return cls
12 |
13 |
14 | def PlanningSolution(cls):
15 | cls._is_planning_solution = True
16 | return cls
17 |
18 |
19 | @PlanningEntity
20 | class Job:
21 | def __init__(self, id, name):
22 | self.id = id
23 | self.name = name
24 |
25 |
26 | @PlanningEntity
27 | class Resource:
28 | def __init__(self, id, name):
29 | self.id = id
30 | self.name = name
31 |
32 |
33 | @PlanningEntity
34 | class Timeslot:
35 | def __init__(self, id, start_time, end_time):
36 | self.id = id
37 | self.start_time = start_time
38 | self.end_time = end_time
39 |
40 |
41 | @PlanningEntity
42 | class JobAssignment:
43 | def __init__(self, job, resource=None, timeslot=None):
44 | self.job = job
45 | self.resource = resource
46 | self.timeslot = timeslot
47 |
48 |
49 | @PlanningSolution
50 | class JobSchedule:
51 | def __init__(self, job_assignments, resources, timeslots):
52 | self.job_assignments = job_assignments
53 | self.resources = resources
54 | self.timeslots = timeslots
55 |
56 |
57 | class TabuSearchEntitySelector:
58 | def __init__(self, tabu_tenure=10):
59 | self.tabu_tenure = tabu_tenure
60 |
61 | def evaluate(self, job_schedule):
62 | # Objective: minimize the total usage of timeslots (example)
63 | return sum(
64 | [
65 | assignment.timeslot.end_time - assignment.timeslot.start_time
66 | for assignment in job_schedule.job_assignments
67 | if assignment.timeslot
68 | ]
69 | )
70 |
71 |
72 | class NeighborMoveSelector:
73 | def generate_neighbors(self, job_schedule):
74 | neighbors = []
75 | for assignment in job_schedule.job_assignments:
76 | for resource in job_schedule.resources:
77 | for timeslot in job_schedule.timeslots:
78 | new_assignment = copy.deepcopy(assignment)
79 | new_assignment.resource = resource
80 | new_assignment.timeslot = timeslot
81 | new_job_assignments = copy.deepcopy(job_schedule.job_assignments)
82 | new_job_assignments[job_schedule.job_assignments.index(assignment)] = new_assignment
83 | neighbors.append(JobSchedule(new_job_assignments, job_schedule.resources, job_schedule.timeslots))
84 | return neighbors
85 |
86 |
87 | if __name__ == "__main__":
88 | jobs = [Job(1, "Job1"), Job(2, "Job2")]
89 | resources = [Resource(1, "Resource1"), Resource(2, "Resource2")]
90 | timeslots = [Timeslot(1, 8, 10), Timeslot(2, 10, 12)]
91 |
92 | job_assignments = [JobAssignment(job) for job in jobs]
93 | job_schedule = JobSchedule(job_assignments, resources, timeslots)
94 |
95 | entity_selector = TabuSearchEntitySelector(tabu_tenure=10)
96 | move_selector = NeighborMoveSelector()
97 | termination_config = TerminationConfig(seconds_spent_limit=10, max_iterations=100)
98 |
99 | solver_config = SolverConfig(
100 | entity_selector=entity_selector, move_selector=move_selector, termination=termination_config
101 | )
102 | solver = LekinSolver(solver_config)
103 |
104 | solution = solver.solve(job_schedule)
105 |
106 | if solution:
107 | for assignment in solution.job_assignments:
108 | if assignment.resource:
109 | print(
110 | f"Job {assignment.job.name} assigned to Resource {assignment.resource.name} at "
111 | f"Timeslot {assignment.timeslot.id}"
112 | )
113 |
--------------------------------------------------------------------------------
/examples/data/k1.json:
--------------------------------------------------------------------------------
1 | {
2 | "itineraries": [
3 | {
4 | "itineraryName": "Itinerary 1",
5 | "tasksList": [
6 | {
7 | "taskName": "Task 1",
8 | "taskMachine": {
9 | "machineName": "M1"
10 | },
11 | "taskDuration": 10.0
12 | },
13 | {
14 | "taskName": "Task 2",
15 | "taskMachine": {
16 | "machineName": "M2"
17 | },
18 | "taskDuration": 5.0
19 | },
20 | {
21 | "taskName": "Task 3",
22 | "taskMachine": {
23 | "machineName": "M3"
24 | },
25 | "taskDuration": 35.0
26 | }
27 | ]
28 | },
29 | {
30 | "itineraryName": "Itinerary 2",
31 | "tasksList": [
32 | {
33 | "taskName": "Task 1",
34 | "taskMachine": {
35 | "machineName": "M2"
36 | },
37 | "taskDuration": 25.0
38 | },
39 | {
40 | "taskName": "Task 2",
41 | "taskMachine": {
42 | "machineName": "M1"
43 | },
44 | "taskDuration": 5.0
45 | },
46 | {
47 | "taskName": "Task 3",
48 | "taskMachine": {
49 | "machineName": "M3"
50 | },
51 | "taskDuration": 30.0
52 | },
53 | {
54 | "taskName": "Task 4",
55 | "taskMachine": {
56 | "machineName": "M4"
57 | },
58 | "taskDuration": 15.0
59 | }
60 | ]
61 | },
62 | {
63 | "itineraryName": "Itinerary 3",
64 | "tasksList": [
65 | {
66 | "taskName": "Task 1",
67 | "taskMachine": {
68 | "machineName": "M2"
69 | },
70 | "taskDuration": 5.0
71 | },
72 | {
73 | "taskName": "Task 2",
74 | "taskMachine": {
75 | "machineName": "M4"
76 | },
77 | "taskDuration": 10.0
78 | }
79 | ]
80 | },
81 | {
82 | "itineraryName": "Itinerary 4",
83 | "tasksList": [
84 | {
85 | "taskName": "Task 1",
86 | "taskMachine": {
87 | "machineName": "M2"
88 | },
89 | "taskDuration": 15.0
90 | },
91 | {
92 | "taskName": "Task 2",
93 | "taskMachine": {
94 | "machineName": "M3"
95 | },
96 | "taskDuration": 10.0
97 | },
98 | {
99 | "taskName": "Task 3",
100 | "taskMachine": {
101 | "machineName": "M4"
102 | },
103 | "taskDuration": 20.0
104 | },
105 | {
106 | "taskName": "Task 4",
107 | "taskMachine": {
108 | "machineName": "M1"
109 | },
110 | "taskDuration": 10.0
111 | }
112 | ]
113 | }
114 | ],
115 | "machines": [
116 | {
117 | "machineName": "M1"
118 | },
119 | {
120 | "machineName": "M2"
121 | },
122 | {
123 | "machineName": "M3"
124 | },
125 | {
126 | "machineName": "M4"
127 | }
128 | ]
129 | }
130 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/variable_neighborhood_search.py:
--------------------------------------------------------------------------------
1 | """Variable neighborhood search"""
2 |
3 | import random
4 |
5 |
6 | class VNSScheduler:
7 | def __init__(self, jobs, routes, max_iterations, neighborhood_size):
8 | self.jobs = jobs
9 | self.routes = routes
10 | self.max_iterations = max_iterations
11 | self.neighborhood_size = neighborhood_size
12 |
13 | def initialize_solution(self):
14 | # Generate an initial random solution representing start times for operations
15 | solution = {}
16 | for job in self.jobs:
17 | solution[job] = {}
18 | for operation in job.route.operations:
19 | solution[job][operation] = random.randint(0, 100) # Random start time
20 | return solution
21 |
22 | def evaluate_objectives(self, solution):
23 | # Evaluate the objectives for a given solution
24 | makespan = self.calculate_makespan(solution)
25 | total_tardiness = self.calculate_total_tardiness(solution)
26 | resource_utilization = self.calculate_resource_utilization(solution)
27 | return makespan, total_tardiness, resource_utilization
28 |
29 | def calculate_makespan(self, solution):
30 | # Calculate the makespan for a given solution
31 | # Implementation specific to your job shop scheduling problem
32 | pass
33 |
34 | def calculate_total_tardiness(self, solution):
35 | # Calculate the total tardiness for a given solution
36 | # Implementation specific to your job shop scheduling problem
37 | pass
38 |
39 | def calculate_resource_utilization(self, solution):
40 | # Calculate the resource utilization for a given solution
41 | # Implementation specific to your job shop scheduling problem
42 | pass
43 |
44 | def generate_neighbor(self, current_solution):
45 | # Generate a neighbor solution by perturbing the current solution
46 | # You can use different perturbation techniques like swap, insert, etc.
47 | # based on your specific problem requirements
48 | neighbor_solution = current_solution.copy()
49 | # Implement your perturbation here
50 | return neighbor_solution
51 |
52 | def accept_neighbor(self, current_solution, neighbor_solution, temperature):
53 | # Decide whether to accept the neighbor solution based on acceptance criteria
54 | # For VNS, you can use simulated annealing-like acceptance probability
55 | # based on the difference in objective values and the current temperature
56 | current_objectives = self.evaluate_objectives(current_solution)
57 | neighbor_objectives = self.evaluate_objectives(neighbor_solution)
58 | current_cost = self.calculate_cost(current_objectives)
59 | neighbor_cost = self.calculate_cost(neighbor_objectives)
60 |
61 | if neighbor_cost < current_cost:
62 | return True
63 | else:
64 | acceptance_prob = min(1, pow(2.71, -(neighbor_cost - current_cost) / temperature))
65 | return random.random() < acceptance_prob
66 |
67 | def calculate_cost(self, objectives):
68 | # Calculate the cost for a set of objectives
69 | # You can define a weighted sum, weighted sum of ranks, or other measures
70 | # based on your specific problem requirements and preferences
71 | # For example, you can use a weighted sum of makespan and total tardiness
72 | weight_makespan = 1
73 | weight_tardiness = 1
74 | return weight_makespan * objectives[0] + weight_tardiness * objectives[1]
75 |
76 | def run(self):
77 | current_solution = self.initialize_solution()
78 | temperature = 100 # Initial temperature for simulated annealing
79 | iteration = 0
80 |
81 | while iteration < self.max_iterations:
82 | for _ in range(self.neighborhood_size):
83 | neighbor_solution = self.generate_neighbor(current_solution)
84 | if self.accept_neighbor(current_solution, neighbor_solution, temperature):
85 | current_solution = neighbor_solution
86 | temperature *= 0.95 # Reduce temperature for simulated annealing
87 | iteration += 1
88 |
89 | return current_solution
90 |
--------------------------------------------------------------------------------
/lekin/solver/core/problem.py:
--------------------------------------------------------------------------------
1 | """
2 | Core problem representation for scheduling.
3 | """
4 |
5 | from dataclasses import dataclass
6 | from datetime import datetime
7 | from typing import Any, Dict, List, Optional
8 |
9 | from lekin.lekin_struct.job import Job
10 | from lekin.lekin_struct.resource import Resource
11 | from lekin.lekin_struct.route import Route
12 |
13 |
14 | @dataclass
15 | class Problem:
16 | """Represents a scheduling problem instance."""
17 |
18 | jobs: List[Job]
19 | routes: List[Route]
20 | resources: List[Resource]
21 | config: Optional[Dict[str, Any]] = None
22 |
23 | def validate(self) -> bool:
24 | """Validate the problem instance.
25 |
26 | Returns:
27 | True if problem is valid, False otherwise
28 | """
29 | try:
30 | # Check if all required components are present
31 | if not self.jobs or not self.routes or not self.resources:
32 | return False
33 |
34 | # Validate job-route assignments
35 | for job in self.jobs:
36 | if not any(route.route_id == job.assigned_route_id for route in self.routes):
37 | return False
38 |
39 | # Validate resource capabilities
40 | for route in self.routes:
41 | for operation in route.operations_sequence:
42 | if not any(
43 | resource.resource_id in [r.resource_id for r in operation.available_resource]
44 | for resource in self.resources
45 | ):
46 | return False
47 |
48 | return True
49 |
50 | except Exception:
51 | return False
52 |
53 | def get_job_by_id(self, job_id: str) -> Optional[Job]:
54 | """Get a job by its ID.
55 |
56 | Args:
57 | job_id: The ID of the job to find
58 |
59 | Returns:
60 | The job if found, None otherwise
61 | """
62 | return next((job for job in self.jobs if job.job_id == job_id), None)
63 |
64 | def get_route_by_id(self, route_id: str) -> Optional[Route]:
65 | """Get a route by its ID.
66 |
67 | Args:
68 | route_id: The ID of the route to find
69 |
70 | Returns:
71 | The route if found, None otherwise
72 | """
73 | return next((route for route in self.routes if route.route_id == route_id), None)
74 |
75 | def get_resource_by_id(self, resource_id: str) -> Optional[Resource]:
76 | """Get a resource by its ID.
77 |
78 | Args:
79 | resource_id: The ID of the resource to find
80 |
81 | Returns:
82 | The resource if found, None otherwise
83 | """
84 | return next((resource for resource in self.resources if resource.resource_id == resource_id), None)
85 |
86 | def get_compatible_resources(self, operation) -> List[Resource]:
87 | """Get all resources compatible with an operation.
88 |
89 | Args:
90 | operation: The operation to find compatible resources for
91 |
92 | Returns:
93 | List of compatible resources
94 | """
95 | return [
96 | resource
97 | for resource in self.resources
98 | if resource.resource_id in [r.resource_id for r in operation.available_resource]
99 | ]
100 |
101 | def get_operation_sequence(self, job: Job) -> List[Any]:
102 | """Get the sequence of operations for a job.
103 |
104 | Args:
105 | job: The job to get operations for
106 |
107 | Returns:
108 | List of operations in sequence
109 | """
110 | route = self.get_route_by_id(job.assigned_route_id)
111 | if not route:
112 | return []
113 | return route.operations_sequence
114 |
115 | def get_time_window(self, job: Job) -> tuple[datetime, datetime]:
116 | """Get the time window for a job.
117 |
118 | Args:
119 | job: The job to get time window for
120 |
121 | Returns:
122 | Tuple of (earliest_start, latest_end)
123 | """
124 | # Implement your time window calculation logic here
125 | return job.release_date, job.demand_date
126 |
--------------------------------------------------------------------------------
/examples/rule_example.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import logging
4 |
5 | from lekin.dashboard.gantt import get_scheduling_res_from_all_jobs, plot_gantt_chart
6 | from lekin.lekin_struct import Job, JobCollector, Operation, Resource, ResourceCollector, Route, RouteCollector
7 | from lekin.solver.construction_heuristics import BackwardScheduler, ForwardScheduler
8 |
9 | logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.DEBUG)
10 |
11 |
12 | def prepare_data(file_path="./data/k1.json"):
13 | with open(file_path, "r", encoding="utf8") as file: # read file from path
14 | data = json.loads(file.read())
15 |
16 | job_collector = JobCollector()
17 | route_collector = RouteCollector()
18 | resource_collector = ResourceCollector()
19 |
20 | if list(data.keys()) == ["itineraries", "machines"]:
21 | resources = data["machines"] # is first level structure is correct, then split
22 | routes = data["itineraries"]
23 |
24 | # parse the resource
25 | for re in resources:
26 | re_name = re["machineName"]
27 | re_id = int(re_name.replace("M", ""))
28 | resource = Resource(resource_id=re_id, resource_name=re_name)
29 | resource.available_hours = list(range(1, 200))
30 | resource_collector.add_resource_dict(resource)
31 |
32 | print([i.resource_id for i in resource_collector.get_all_resources()])
33 | print(resource_collector.get_all_resources()[0].available_hours)
34 |
35 | # parse the job and route
36 | for ro in routes:
37 | ro_id = int(ro["itineraryName"].replace("Itinerary ", ""))
38 | route = Route(route_id=ro_id)
39 | operations_sequence = []
40 | for ta in ro["tasksList"]:
41 | op_name = ta["taskName"]
42 | op_id = ta["taskName"].replace("Task ", "")
43 | op_pt = ta["taskDuration"]
44 |
45 | op_tm = []
46 | if isinstance(ta["taskMachine"], list):
47 | for re in ta["taskMachine"]:
48 | re_name = re["machineName"]
49 | re_id = int(re_name.replace("M", ""))
50 | op_tm.append(resource_collector.get_resource_by_id(re_id))
51 | else:
52 | re_name = ta["taskMachine"]["machineName"]
53 | re_id = int(re_name.replace("M", ""))
54 | op_tm.append(resource_collector.get_resource_by_id(re_id))
55 |
56 | operations_sequence.append(
57 | Operation(
58 | operation_id=op_id,
59 | operation_name=op_name,
60 | quantity=1,
61 | processing_time=op_pt,
62 | parent_job_id=ro_id, # route defines job here
63 | available_resource=op_tm,
64 | )
65 | )
66 |
67 | route.operations_sequence = operations_sequence
68 | route_collector.add_route(route)
69 |
70 | job_collector.add_job(Job(job_id=ro_id, assigned_route_id=ro_id))
71 |
72 | # print(resources)
73 | # print(routes)
74 | return job_collector, resource_collector, route_collector
75 |
76 |
77 | def run_scheduling(job_collector, resource_collector, route_collector, use_model="forward"):
78 | if use_model == "forward":
79 | scheduler = ForwardScheduler(job_collector, resource_collector, route_collector)
80 | elif use_model == "backward":
81 | scheduler = BackwardScheduler(job_collector, resource_collector, route_collector)
82 | else:
83 | raise ValueError
84 |
85 | scheduler.run()
86 | return
87 |
88 |
89 | if __name__ == "__main__":
90 | parser = argparse.ArgumentParser()
91 | parser.add_argument("--use_model", type=str, default="backward")
92 | args = parser.parse_args()
93 |
94 | job_collector, resource_collector, route_collector = prepare_data(file_path="./data/k1.json")
95 | run_scheduling(job_collector, resource_collector, route_collector, use_model=args.use_model)
96 |
97 | scheduling_res = get_scheduling_res_from_all_jobs(job_collector)
98 | print(scheduling_res)
99 | plot_gantt_chart(job_collector, scheduling_res)
100 |
--------------------------------------------------------------------------------
/docs/source/_static/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/spt.py:
--------------------------------------------------------------------------------
1 | """Shortest Processing Time"""
2 |
3 | from collections import OrderedDict
4 | import logging
5 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6 |
7 | from lekin.solver.construction_heuristics.base import BaseScheduler
8 |
9 |
10 | class SPTScheduler(object):
11 | def __init__(self):
12 | self.time = {} # global时间队列
13 | self.waiting_operations = {} # 记录每个机器的任务等待队列
14 | self.jobs_list_to_export = []
15 |
16 | def setup(self, job_list: List, machine_list: List):
17 | for machine in machine_list:
18 | # init for machine start time
19 | self.current_time_on_machines[machine.name] = 0
20 |
21 | # init for waiting list of machines
22 | self.waiting_operations[machine.name] = 0
23 | for job in job_list:
24 | if job.operation_id == 1 and machine.name in job.machine:
25 | if len(job.machine) == 1:
26 | self.waiting_operations[machine.name].append(job)
27 |
28 | self.waiting_operations[machine.name].sort(key=lambda j: j.duration)
29 | return
30 |
31 | def solve(self, job_list: List, machine_list: List):
32 | self.setup(job_list, machine_list)
33 |
34 | self.time[0] = self.waiting_operations
35 |
36 | for key_mach, operations in self.waiting_operations.items():
37 | # for each waiting task in front of machine, set time to 0
38 | if len(operations):
39 | operations[0].start_time = 0
40 | operations[0].completed = True
41 | operations[0].assigned_machine = key_mach
42 |
43 | self.jobs_list_to_export.append(operations[0])
44 | self.current_time_on_machines[key_mach] = operations[0].get_end_time()
45 | self.time[self.current_time_on_machines[key_mach]] = {}
46 |
47 | while len(self.jobs_list_to_export) != len(job_list):
48 | for t, operations in self.time.items():
49 | operations = self.get_waiting_operations(
50 | job_list, float(t), machine_list, self.current_time_on_machines
51 | )
52 |
53 | for key_mach, tasks in operations.items():
54 | if len(tasks):
55 | if float(t) < self.current_time_on_machines[key_mach]:
56 | continue
57 |
58 | tasks[0].start_time = float(t)
59 | tasks[0].completed = True
60 | tasks[0].assigned_machine = key_mach
61 |
62 | self.jobs_list_to_export.append(tasks[0])
63 | self.current_time_on_machines[key_mach] = tasks[0].get_end_time()
64 | self.time[self.current_time_on_machines[key_mach]] = {}
65 |
66 | del self.time[t]
67 | break
68 | self.time = OrderedDict(self.time)
69 | return self.jobs_list_to_export
70 |
71 | def get_waiting_operations(self, job_list, time, machine_list, current_time_on_machines):
72 | incoming_operations = {}
73 |
74 | for mach in machine_list:
75 | assigned_jobs_for_machine = []
76 | for job in job_list:
77 | if job.completed is False and mach.name in job.machine:
78 | if len(job.machine) == 1:
79 | assigned_jobs_for_machine.append(job)
80 |
81 | incoming_operations[mach.name] = []
82 | for j in assigned_jobs_for_machine:
83 | if j.id_operation == 1:
84 | incoming_operations[mach.name].append(j)
85 | else:
86 | previous_task = [
87 | job
88 | for job in job_list
89 | if job.route_id == j.route_id
90 | and job.id_operation == (j.id_operation - 1)
91 | and job.end_time <= time
92 | ]
93 | if len(previous_task):
94 | if previous_task[0].completed:
95 | incoming_operations[mach.name].append(j)
96 |
97 | incoming_operations[mach.name].sort(key=lambda j: j.duration)
98 | return incoming_operations
99 |
100 | def run(self):
101 | return
102 |
--------------------------------------------------------------------------------
/lekin/lekin_struct/execution_mode.py:
--------------------------------------------------------------------------------
1 | """
2 | ExecutionMode module for representing different ways to execute operations.
3 |
4 | This module provides the ExecutionMode class for managing different execution modes
5 | of operations in the scheduling process. Each mode represents a specific way to
6 | execute an operation with its own duration and resource requirements.
7 | """
8 |
9 | from dataclasses import dataclass, field
10 | from typing import Any, Dict, List, Optional
11 |
12 |
13 | @dataclass
14 | class ExecutionMode:
15 | """Represents a specific way to execute an operation.
16 |
17 | An execution mode defines how an operation can be performed, including
18 | its duration and resource requirements. Different modes may represent
19 | different processing speeds, resource combinations, or quality levels.
20 |
21 | Attributes:
22 | id (str): Unique identifier for the execution mode
23 | job_id (str): ID of the job this mode belongs to
24 | duration (int): Time required to complete the operation in this mode
25 | resource_requirements (List[Dict[str, Any]]): List of required resources
26 | cost (float): Cost of executing in this mode
27 | quality_level (int): Quality level of the execution (higher is better)
28 | metadata (Dict[str, Any]): Additional metadata for the mode
29 | """
30 |
31 | id: str
32 | job_id: str
33 | duration: int
34 | resource_requirements: List[Dict[str, Any]] = field(default_factory=list)
35 | cost: float = 0.0
36 | quality_level: int = 1
37 | metadata: Dict[str, Any] = field(default_factory=dict)
38 |
39 | def __post_init__(self):
40 | """Validate execution mode attributes after initialization."""
41 | if not isinstance(self.id, str) or not self.id:
42 | raise ValueError("id must be a non-empty string")
43 | if not isinstance(self.job_id, str) or not self.job_id:
44 | raise ValueError("job_id must be a non-empty string")
45 | if self.duration <= 0:
46 | raise ValueError("duration must be positive")
47 | if self.cost < 0:
48 | raise ValueError("cost must be non-negative")
49 | if self.quality_level < 1:
50 | raise ValueError("quality_level must be at least 1")
51 |
52 | def add_resource_requirement(self, resource_id: str, quantity: int = 1, setup_time: float = 0.0) -> None:
53 | """Add a resource requirement to this execution mode.
54 |
55 | Args:
56 | resource_id: ID of the required resource
57 | quantity: Number of units required
58 | setup_time: Setup time required for this resource
59 |
60 | Raises:
61 | ValueError: If quantity is not positive or setup_time is negative
62 | """
63 | if quantity <= 0:
64 | raise ValueError("quantity must be positive")
65 | if setup_time < 0:
66 | raise ValueError("setup_time must be non-negative")
67 |
68 | requirement = {"resource_id": resource_id, "quantity": quantity, "setup_time": setup_time}
69 | self.resource_requirements.append(requirement)
70 |
71 | def get_total_setup_time(self) -> float:
72 | """Calculate the total setup time for all resources.
73 |
74 | Returns:
75 | float: Total setup time required
76 | """
77 | return sum(req.get("setup_time", 0.0) for req in self.resource_requirements)
78 |
79 | def get_total_cost(self) -> float:
80 | """Calculate the total cost of execution.
81 |
82 | Returns:
83 | float: Total cost including resource costs and mode cost
84 | """
85 | resource_costs = sum(req.get("cost", 0.0) for req in self.resource_requirements)
86 | return self.cost + resource_costs
87 |
88 | def is_valid(self) -> bool:
89 | """Check if this execution mode is valid.
90 |
91 | Returns:
92 | bool: True if the mode is valid, False otherwise
93 | """
94 | return self.id and self.job_id and self.duration > 0 and len(self.resource_requirements) > 0
95 |
96 | def __repr__(self) -> str:
97 | """Return a string representation of the execution mode."""
98 | return f"ExecutionMode(id={self.id}, " f"job_id={self.job_id}, " f"duration={self.duration})"
99 |
100 | def __str__(self) -> str:
101 | """Return a human-readable string representation of the execution mode."""
102 | return f"Execution Mode {self.id}: " f"Duration {self.duration}, " f"Quality Level {self.quality_level}"
103 |
--------------------------------------------------------------------------------
/lekin/scheduler.py:
--------------------------------------------------------------------------------
1 | """Flexible job shop scheduler with support for multiple scheduling types and objectives.
2 |
3 | This module provides a flexible scheduling framework that supports:
4 | - Different scheduling types (job shop, flow shop, open shop)
5 | - Multiple objectives (makespan, tardiness, etc.)
6 | - Various solving methods (heuristics, meta-heuristics, exact methods)
7 | - Visualization and evaluation capabilities
8 | """
9 |
10 | from abc import ABC, abstractmethod
11 | from collections import OrderedDict
12 | import logging
13 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union
14 |
15 | from lekin.datasets.check_data import check_data
16 | from lekin.solver import BaseSolver
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | class Scheduler(ABC):
22 | """Base class for all schedulers.
23 |
24 | This class provides a common interface for different types of schedulers
25 | and implements shared functionality.
26 | """
27 |
28 | def __init__(
29 | self,
30 | objective: "BaseObjective",
31 | solver: BaseSolver,
32 | max_operations: int,
33 | scheduling_type: str = "job_shop",
34 | **kwargs,
35 | ):
36 | """Initialize the scheduler.
37 |
38 | Args:
39 | objective: The objective function to optimize
40 | solver: The solver to use for finding solutions
41 | max_operations: Maximum number of operations per job
42 | scheduling_type: Type of scheduling problem ("job_shop", "flow_shop", "open_shop")
43 | **kwargs: Additional configuration parameters
44 | """
45 | self.objective = objective
46 | self.solver = solver
47 | self.max_operations = max_operations
48 | self.scheduling_type = scheduling_type
49 | self.config = kwargs
50 | self.solution = None
51 | self.metrics = {}
52 |
53 | # Validate scheduling type
54 | valid_types = ["job_shop", "flow_shop", "open_shop"]
55 | if scheduling_type not in valid_types:
56 | raise ValueError(f"Scheduling type must be one of {valid_types}")
57 |
58 | logger.info(f"Initialized {scheduling_type} scheduler with {solver.__class__.__name__}")
59 |
60 | def run(self, jobs: List[Dict], machines: List[Dict]) -> Dict:
61 | """Run the scheduling algorithm.
62 |
63 | Args:
64 | jobs: List of jobs with their operations and requirements
65 | machines: List of available machines and their capabilities
66 |
67 | Returns:
68 | Dict containing the scheduling solution
69 | """
70 | # Validate input data
71 | check_data(jobs, machines)
72 |
73 | # Solve the scheduling problem
74 | self.solution = self.solver.solve(jobs, machines)
75 |
76 | # Evaluate the solution
77 | self.evaluate()
78 |
79 | return self.solution
80 |
81 | def evaluate(self) -> Dict[str, float]:
82 | """Evaluate the current solution using the objective function.
83 |
84 | Returns:
85 | Dict containing evaluation metrics
86 | """
87 | if self.solution is None:
88 | raise ValueError("No solution available. Run the scheduler first.")
89 |
90 | self.metrics = self.objective.evaluate(self.solution)
91 | logger.info(f"Solution evaluation: {self.metrics}")
92 | return self.metrics
93 |
94 | def plot(self, save_path: Optional[str] = None) -> None:
95 | """Plot the current solution.
96 |
97 | Args:
98 | save_path: Optional path to save the plot
99 | """
100 | if self.solution is None:
101 | raise ValueError("No solution available. Run the scheduler first.")
102 |
103 | self.solver.plot(self.solution, save_path=save_path)
104 |
105 | @abstractmethod
106 | def validate_solution(self, solution: Dict) -> bool:
107 | """Validate if a solution satisfies all constraints.
108 |
109 | Args:
110 | solution: The solution to validate
111 |
112 | Returns:
113 | bool indicating if the solution is valid
114 | """
115 | pass
116 |
117 | def get_metrics(self) -> Dict[str, float]:
118 | """Get the current evaluation metrics.
119 |
120 | Returns:
121 | Dict containing the metrics
122 | """
123 | return self.metrics.copy()
124 |
125 | def reset(self) -> None:
126 | """Reset the scheduler state."""
127 | self.solution = None
128 | self.metrics = {}
129 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [license-image]: https://img.shields.io/badge/License-Apache%202.0-blue.svg
2 | [license-url]: https://opensource.org/licenses/Apache-2.0
3 | [pypi-image]: https://badge.fury.io/py/lekin.svg
4 | [pypi-url]: https://pypi.python.org/pypi/lekin
5 | [pepy-image]: https://pepy.tech/badge/lekin
6 | [pepy-url]: https://pepy.tech/project/lekin
7 | [build-image]: https://github.com/HongyingYue/python-lekin/actions/workflows/test.yml/badge.svg?branch=main
8 | [build-url]: https://github.com/HongyingYue/python-lekin/actions/workflows/test.yml?query=branch%3Amain
9 | [lint-image]: https://github.com/HongyingYue/python-lekin/actions/workflows/lint.yml/badge.svg?branch=main
10 | [lint-url]: https://github.com/HongyingYue/python-lekin/actions/workflows/lint.yml?query=branch%3Amain
11 | [docs-image]: https://readthedocs.org/projects/python-lekin/badge/?version=latest
12 | [docs-url]: https://python-lekin.readthedocs.io/en/latest/
13 | [coverage-image]: https://codecov.io/gh/HongyingYue/python-lekin/branch/main/graph/badge.svg
14 | [coverage-url]: https://codecov.io/github/HongyingYue/python-lekin?branch=main
15 |
16 |
17 |
18 |
19 |
20 | [![LICENSE][license-image]][license-url]
21 | [![PyPI Version][pypi-image]][pypi-url]
22 | [![Download][pepy-image]][pepy-url]
23 | [![Build Status][build-image]][build-url]
24 | [![Lint Status][lint-image]][lint-url]
25 | [![Docs Status][docs-image]][docs-url]
26 | [![Code Coverage][coverage-image]][coverage-url]
27 |
28 | **[Documentation](https://python-lekin.readthedocs.io)** | **[Tutorials](https://python-lekin.readthedocs.io/en/latest/tutorials.html)** | **[Release Notes](https://python-lekin.readthedocs.io/en/latest/CHANGELOG.html)** | **[中文](https://github.com/hongyingyuen/python-lekin/blob/main/README_zh_CN.md)**
29 |
30 | **python-lekin** is a Flexible Job Shop Scheduler Library, named after [Lekin](https://web-static.stern.nyu.edu/om/software/lekin/).
31 | As a core function in **APS (advanced planning and scheduler)**, it helps manufacturers optimize the allocation of materials and production capacity optimally to balance demand and capacity.
32 |
33 | - Changeover Optimization
34 | - Ready for demo, research and maybe production
35 |
36 | # **DEVELOPING - NOT FINISHED AND DON'T USE IT NOW!**
37 |
38 |
39 | ## Features
40 |
41 | - Multiple solving strategies:
42 | - Continuous Time Planning (CTP)
43 | - Construction Heuristics
44 | - Meta-heuristics (Genetic Algorithm, Simulated Annealing)
45 | - Reinforcement Learning
46 | - Operation Research methods
47 | - Extensible architecture for custom solvers
48 | - Comprehensive constraint handling
49 | - Performance metrics and visualization
50 | - Parallel solving capabilities
51 | - Solution validation and verification
52 |
53 |
54 | ## Tutorial
55 |
56 | [](https://colab.research.google.com/drive/1zHqYZFZNvLE7aoDcBUh7TCK7oHfXQkLi?usp=sharing)
57 |
58 |
59 | **Installation**
60 |
61 | ``` shell
62 | pip install lekin
63 | ```
64 |
65 | **Usage**
66 |
67 | ``` python
68 | from lekin import Heuristics, Rule
69 | from lekin import Scheduler
70 |
71 | solver = Rule('SPT')
72 | scheduler = Scheduler(solver)
73 | scheduler.solve(job_list, machine_list)
74 |
75 | scheduler.draw()
76 | ```
77 |
78 | ## Examples
79 |
80 | In real world, Lekin integrates with MES to deploy production plans on the shop floor. Integration with ERP system is also required to exchange information on demand, inventory, and production
81 |
82 | - Exhaustive search
83 | - branch and bound
84 |
85 | - Construction heuristics
86 | - [SPT]()
87 | - [critical path]()
88 |
89 | - Meta heuristics
90 | - [local search]()
91 | - [hill climbing]()
92 | - [tabu search]()
93 | - [evolutionary algorithms]()
94 | - [genetic algorithms]()
95 |
96 | - Operation search
97 | - [or-tools]()
98 |
99 | - Reinforcement learning
100 |
101 | Metaheuristics combined with Construction
102 | Heuristics to initialize is the recommended choice.
103 |
104 |
105 | ### Adding New Constraints
106 |
107 | ```python
108 | from lekin.solver.constraints import BaseConstraint
109 |
110 | class MyCustomConstraint(BaseConstraint):
111 | def check(self, solution):
112 | # Implement your constraint checking logic
113 | pass
114 | ```
115 |
116 |
117 |
118 | ## Citation
119 | ```
120 | @misc{python-lekin2022,
121 | author = {Hongying Yue},
122 | title = {python lekin},
123 | year = {2022},
124 | publisher = {GitHub},
125 | journal = {GitHub repository},
126 | howpublished = {\url{https://github.com/hongyingyue/python-lekin}},
127 | }
128 | ```
129 |
--------------------------------------------------------------------------------
/docs/source/application.rst:
--------------------------------------------------------------------------------
1 | 应用
2 | ============
3 |
4 | 基本概念
5 | ----------------
6 | 均衡生产:heijunka
7 |
8 |
9 |
10 | 组合优化基础
11 | -------------
12 |
13 | - 装箱问题(Bin Packing, BP)
14 | - 背包问题(Knapsack Problem, KP)
15 | - 车间调度问题(Job-shop Scheduling Problem, JSP)
16 | - 整数规划问题(Integer Programming, IP)
17 |
18 | - 旅行商问题(Traveling Salesman Problem, TSP)
19 | - 车辆路径问题(Vehicle Routing Problem, VRP)
20 | - 图着色问题(Graph Coloring, GC)
21 | - 图匹配问题(Graph Matching, GM)
22 |
23 |
24 | - 精确算法:分支定界法(Branch and Bound)和动态规划法(Dynamic Programming)
25 |
26 | - 近似算法:近似算法(Approximate Algorithms)和启发式算法(Heuristic Algorithms)
27 | - 贪心算法、局部搜索算法、线性规划、松弛算法、序列算法
28 | - 模拟退火算法、禁忌搜索、进化算法、蚁群优化算法、粒子群算法、迭代局部搜索、变邻域搜索
29 |
30 |
31 |
32 | 数据
33 | ----------------
34 |
35 | MRP: Material Requirements Planning
36 |
37 | BOM: Bill Of Materials
38 |
39 | Route
40 |
41 |
42 | 数据准备
43 | - 主需求数据
44 | - 工艺路线
45 | - 资源日历
46 |
47 |
48 | 功能
49 | ------------------------
50 |
51 | 一个完善的APS系统包含以下模块。
52 | - 需求中心
53 | - 排程中心
54 | - 排程工作台
55 | - 物料中心
56 |
57 |
58 | 建模
59 | ----------
60 |
61 | Activities represent operations with time and resource requirements
62 | Resources have calendars defining availability
63 | Demand represents customer orders to fulfill
64 |
65 |
66 | 过程
67 | ----------
68 |
69 | 实际APS会涉及到各个车间,各个工序的复杂区分,以及BOM中涉及到多工厂的部分。
70 | - 其实和APS关系不大,APS只要把最后一层工序展开,最后一层BOM展开,按实际的资源约束进行计算,最后只是展现形式上的区别
71 |
72 | 首先解决m个工序、n个机器的车间排产问题,然后把实际问题往车间问题靠。
73 |
74 | 1、根据一定的规则(产品总工作时长、工序B的最早开工时间等)获得产品的优先级
75 | 2、初始化任务的初始状态,除了每个产品的工序A为可开工状态其余皆为不可开工状态;
76 | 3、根据优先级对工序A对应的任务进行加工,并更新任务的状态及紧后工序的状态;
77 | 4、对机器的空闲时间进行排序,取最早可开工机器k;
78 | 5、根据机器k的空闲开始时间以及任务状态检索任务,存储为任务列表R;
79 | 6、判断任务列表R是否为空,是则k=k+1,返回步骤五,否则进行下一步;
80 | 7、根据任务的最早可加工时间进行排序,选择最早开始的任务进行加工,更新机器状态、任务状态及后续的工序状态;
81 | - 确定任务的开工时间及结束时间
82 | - 更新机器的释放时间
83 | - 更新当前任务的状态、开工时间、完工时间
84 | - 更新当前任务后续节点的最早开工时间,若当前任务为产品的最后一个工序则无须更新
85 | 8、判断所有任务是否均已完工,是则结束,否则返回步骤四。
86 |
87 | 解决冲突的过程,即是一个顺排的过程。把所有分布在该资源上的任务根据顺序进行顺排
88 |
89 |
90 | 车间过程1-倒排+顺排
91 | -------------------
92 |
93 | 先分发冻结期,按锁定期
94 | - 关键问题:成组后的工序部分处于冻结期;job的工序不是完整的工序
95 | - 只assign资源和日历,由于不完整进行顺排。
96 | - 检查物料齐套约束,如果物料不齐套,按齐套排程并给出报警消息
97 |
98 |
99 | 再排锁定任务
100 |
101 | 只倒排,如果倒排不可行则返回错误。按优先级
102 | - 非关键工序: 按lead_time(倒排lead_time,顺排lag_time)、节拍往前排,不考虑齐套时间。
103 | - 关键工序: 如有锁定资源,则按资源情况进行编排。不考虑齐套时间,因为锁定任务是为了确保优先级,物料通过缺料去人工追料
104 | - 共享工序: 共享工序本身取多个job中靠前的时间断。
105 | - 原本已经排过的其他job,前序工序需要以此为新的往前推
106 | - 同时拥有共享工序的job排序适时进行调整,尽量避免以上修改
107 | - 在冻结期且物料约束不满足: 按最早齐套时间进行排产,同时已排工序进行
108 |
109 |
110 | 再根据优先级排产其他任务
111 |
112 | 先倒排
113 | - 第一道关键工序前的非关键工序,先按lead time进行排,后面需要二次更新。
114 | - 关键工序倒排,(task分割会有的)非关键工序则按lead time前推
115 | - 倒排中时间约束都是最晚时间,但物料约束是最早开始时间。如果时间不足以排产,则该工序及之后的工序都转为顺排重排
116 | - 另一个思路是,先直接道排到第一个工序,后续一起按照可开始时间进行顺排。甚至等到推紧时一起顺排是否可行呢
117 | - 遇到共享task,如果之前的共享task被安排的时间更晚,那么剩余工序也转为顺排重排
118 | - 第一步剩余的非关键工序后拉
119 | - 如果任务一步中,资源日历不足则进入下一步
120 |
121 | 倒排有问题则顺排
122 | - 按各个资源最早可用日期开始排 (此时应该选可以最早的资源),非关键工序按lead time排,并需要进行二次更新
123 | - 关键工序顺排. 如果遇到共享task不满足时间约束
124 | - 第一步剩余的非关键工序前拉
125 | - 如果任何一步中,资源日历不足则返回错误
126 | 关键工序完全没有设置的job,按无限产能倒排
127 |
128 |
129 | 任务推紧规整
130 | - 所有任务都采用顺排,类似按资源排产的方法。
131 | - 按关键工序设置,“是否可以挪动”。每一个资源的第一道关键工序都可以向前,并跟新后续的可挪动状态与开始时间约束
132 | - 迭代更新
133 |
134 |
135 | 未排任务再次尝试
136 | - 推紧之后,再次尝试将之前未排的任务进行排产
137 |
138 |
139 | 委外/外协的排产
140 | - 委外的指定是针对供应商,排到日历中
141 | - 根据关键工序的产能,按优先级将各委外的部分进行排产
142 |
143 |
144 | 车间过程2-倒排+顺排2
145 | -------------------------
146 | 仍然是先排锁定任务
147 |
148 | 把所有任务按照交期和最早开工日期进行倒排或顺排,不考虑资源的约束本身 【带来的问题是:资源优先级的选择】
149 |
150 |
151 | 顺排的时候,按照job优先级 【指定 > 优先级】
152 | - 每一个job都按第一道工序其最早开工日期开始,
153 |
154 |
155 | 车间过程3-按资源增量排产
156 | ---------------------------
157 | 输入: 排产任务(MO+计划单)
158 | 输出: 各工序的排产资源与结果
159 | 1. 筛选出主工单与部件工单,建立子部件的属性联系
160 | 2. 筛选出主工单中的关键工序与非关键工序
161 | 3. 初始化历史已排且其资源仍存在的关键工序的资源队列
162 | 4. 对于新任务计划单或资源不存在的情况下, 重新分配任务. 完成资源中任务队列初始化
163 | 5. 资源中任务队列重排
164 | 6. 主工单非关键工序的前推后拉
165 | 7. 部件工单和工序的前推
166 |
167 |
168 | 车间过程4
169 | ----------------
170 | 多工序排产
171 | - 资源上尽量继承原排产顺序,保持结果稳定
172 | -
173 |
174 |
175 | 可视化
176 | ------------
177 | - 资源在时间线上的计划情况
178 | - 按订单,在时间线上的操作情况
179 |
180 |
181 | 可视化重排
182 | -------------------
183 | 输入: 资源和资源任务队列顺序
184 | 输出:
185 | 1. 初始化到增量排产队列任务
186 |
187 |
188 | 产能爬坡
189 | -------------
190 |
191 | Material_op一开始,解析爬坡配置, 得到按小时或按数量的map, 或爬升曲线配置
192 |
193 | 计算OP时间时, 该OP可能是单个工单,或一个计划单/MRP的跨作业单大工序.
194 | 每个OP, 都额外记录其在MaterialOP的时间, 根据该时间从map中找到额外消耗的时间,作为爬坡后的时间
195 |
196 | 同时每个op, 记录其爬坡阶段的的小时和按小时产能
197 |
198 | 计算最终详细排产结果时,根据一个op的初始时间和结束时间,划分落在每个班次的时长,和数量
199 |
200 | [修正: 不能在结果生成时,才产出数量。结果时,每个op在资源那里拆成了按单班产能,结果生成时已经不知道具体的详细爬坡了?.在生成时就确定数量. 但最后一个的数量,可以在最后矫正]
201 |
202 |
203 | 车间排产
204 | ------------------
205 | 0. 定义模型
206 | - Multi-Mode Resource Constrained Project Scheduling Problem (MRCPSP)
207 |
208 | 1. 准备环境
209 |
210 | 2. 定义领域模型
211 | Entity
212 | - Job
213 | - Resource
214 | - Timeslot
215 |
216 | Planning entity
217 | - JobAssignment
218 |
219 | Solution
220 | - JobSchedule
221 |
222 | 3. 约束
223 | - OptaPlanner的约束流API(Constraint Streams API)是一种声明式的API
224 | - 软约束:目标,硬约束:约束
225 |
226 | 4. Solver
227 |
--------------------------------------------------------------------------------
/lekin/objective/__init__.py:
--------------------------------------------------------------------------------
1 | """Job Shop Scheduling Optimization Objectives Module.
2 |
3 | This module provides a collection of objective functions and classes for evaluating
4 | job shop scheduling solutions. It includes various metrics such as makespan,
5 | tardiness, and resource utilization.
6 | """
7 |
8 | from abc import ABC, abstractmethod
9 | from dataclasses import dataclass
10 | from typing import Any, Dict, List, Optional
11 |
12 | from lekin.objective.makespan import calculate_makespan
13 | from lekin.objective.tardiness import (
14 | calculate_tardiness,
15 | calculate_total_late_jobs,
16 | calculate_total_late_time,
17 | calculate_total_tardiness,
18 | )
19 |
20 |
21 | @dataclass
22 | class ObjectiveResult:
23 | """Container for objective function results."""
24 |
25 | value: float
26 | details: Dict[str, Any]
27 | is_feasible: bool = True
28 |
29 |
30 | class SchedulingObjective(ABC):
31 | """Abstract base class for all scheduling objectives."""
32 |
33 | @abstractmethod
34 | def evaluate(self, schedule_result: Dict, job_collector: Any) -> ObjectiveResult:
35 | """Evaluate the objective function for a given schedule.
36 |
37 | Args:
38 | schedule_result: The schedule to evaluate
39 | job_collector: Collection of jobs and resources
40 |
41 | Returns:
42 | ObjectiveResult containing the evaluation results
43 | """
44 | pass
45 |
46 |
47 | class MakespanObjective(SchedulingObjective):
48 | """Objective for minimizing the maximum completion time of all jobs."""
49 |
50 | def evaluate(self, schedule_result: Dict, job_collector: Any) -> ObjectiveResult:
51 | calculate_makespan(job_collector)
52 | max_makespan = max(job.makespan for job in job_collector.job_list)
53 | return ObjectiveResult(
54 | value=max_makespan,
55 | details={"job_makespans": {job.id: job.makespan for job in job_collector.job_list}},
56 | is_feasible=True,
57 | )
58 |
59 |
60 | class TardinessObjective(SchedulingObjective):
61 | """Objective for minimizing job tardiness."""
62 |
63 | def __init__(self, objective_type: str = "total"):
64 | """Initialize tardiness objective.
65 |
66 | Args:
67 | objective_type: Type of tardiness metric ('total', 'max', 'weighted')
68 | """
69 | self.objective_type = objective_type
70 |
71 | def evaluate(self, schedule_result: Dict, job_collector: Any) -> ObjectiveResult:
72 | if self.objective_type == "total":
73 | value = calculate_total_tardiness(schedule_result, job_collector.job_list)
74 | elif self.objective_type == "late_jobs":
75 | value = calculate_total_late_jobs(schedule_result, job_collector.job_list)
76 | elif self.objective_type == "late_time":
77 | value = calculate_total_late_time(schedule_result, job_collector.job_list)
78 | else:
79 | raise ValueError(f"Unknown tardiness objective type: {self.objective_type}")
80 |
81 | return ObjectiveResult(value=value, details={"tardiness_type": self.objective_type}, is_feasible=True)
82 |
83 |
84 | class ResourceUtilizationObjective(SchedulingObjective):
85 | """Objective for maximizing resource utilization."""
86 |
87 | def evaluate(self, schedule_result: Dict, job_collector: Any) -> ObjectiveResult:
88 | total_time = 0
89 | busy_time = 0
90 |
91 | for resource in job_collector.resources:
92 | for operation in schedule_result:
93 | if operation.resource == resource:
94 | busy_time += operation.duration
95 | total_time = max(total_time, operation.end_time)
96 |
97 | utilization = busy_time / (total_time * len(job_collector.resources)) if total_time > 0 else 0
98 |
99 | return ObjectiveResult(
100 | value=utilization,
101 | details={"total_time": total_time, "busy_time": busy_time, "resource_count": len(job_collector.resources)},
102 | is_feasible=True,
103 | )
104 |
105 |
106 | class CompositeObjective(SchedulingObjective):
107 | """Combines multiple objectives with weights."""
108 |
109 | def __init__(self, objectives: List[tuple[SchedulingObjective, float]]):
110 | """Initialize composite objective.
111 |
112 | Args:
113 | objectives: List of (objective, weight) tuples
114 | """
115 | self.objectives = objectives
116 |
117 | def evaluate(self, schedule_result: Dict, job_collector: Any) -> ObjectiveResult:
118 | total_value = 0
119 | details = {}
120 | is_feasible = True
121 |
122 | for objective, weight in self.objectives:
123 | result = objective.evaluate(schedule_result, job_collector)
124 | total_value += weight * result.value
125 | details[objective.__class__.__name__] = result.details
126 | is_feasible = is_feasible and result.is_feasible
127 |
128 | return ObjectiveResult(value=total_value, details=details, is_feasible=is_feasible)
129 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 |
13 | import os
14 | from pathlib import Path
15 | import shutil
16 | import sys
17 |
18 | from sphinx.application import Sphinx
19 | from sphinx.ext.autosummary import Autosummary
20 | from sphinx.pycode import ModuleAnalyzer
21 |
22 | SOURCE_PATH = Path(os.path.dirname(__file__)) # noqa # docs source
23 | PROJECT_PATH = SOURCE_PATH.joinpath("../..") # noqa # project root
24 |
25 | sys.path.insert(0, str(PROJECT_PATH)) # noqa
26 |
27 | import lekin # isort:skip
28 |
29 | # -- Project information -----------------------------------------------------
30 |
31 | project = "python-lekin"
32 | copyright = "2025, Hongying Yue"
33 | author = "Hongying Yue"
34 |
35 |
36 | # -- General configuration ---------------------------------------------------
37 |
38 | # Add any Sphinx extension module names here, as strings. They can be
39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 | # ones.
41 | extensions = [
42 | "nbsphinx",
43 | "recommonmark",
44 | "sphinx_markdown_tables",
45 | "sphinx.ext.autodoc",
46 | "sphinx.ext.autosummary",
47 | "sphinx.ext.doctest",
48 | "sphinx.ext.intersphinx",
49 | "sphinx.ext.mathjax",
50 | "sphinx.ext.viewcode",
51 | "sphinx.ext.githubpages",
52 | "sphinx.ext.napoleon",
53 | ]
54 |
55 | # Add any paths that contain templates here, relative to this directory.
56 | templates_path = ["_templates"]
57 |
58 |
59 | # List of patterns, relative to source directory, that match files and
60 | # directories to ignore when looking for source files.
61 | # This pattern also affects html_static_path and html_extra_path.
62 | exclude_patterns = []
63 |
64 |
65 | # -- Options for HTML output -------------------------------------------------
66 |
67 | # The theme to use for HTML and HTML Help pages. See the documentation for
68 | # a list of builtin themes.
69 | #
70 | html_theme = "pydata_sphinx_theme"
71 | html_logo = "_static/logo.svg"
72 | html_favicon = "_static/logo.svg"
73 |
74 |
75 | # Add any paths that contain custom static files (such as style sheets) here,
76 | # relative to this directory. They are copied after the builtin static files,
77 | # so a file named "default.css" will overwrite the builtin "default.css".
78 | html_static_path = ["_static"]
79 |
80 |
81 | # setup configuration
82 | def skip(app, what, name, obj, skip, options):
83 | """
84 | Document __init__ methods
85 | """
86 | if name == "__init__":
87 | return True
88 | return skip
89 |
90 |
91 | apidoc_output_folder = SOURCE_PATH.joinpath("api")
92 | PACKAGES = [lekin.__name__]
93 |
94 |
95 | def get_by_name(string: str):
96 | """
97 | Import by name and return imported module/function/class
98 | Args:
99 | string (str): module/function/class to import, e.g. 'pandas.read_csv' will return read_csv function as
100 | defined by pandas
101 | Returns:
102 | imported object
103 | """
104 | class_name = string.split(".")[-1]
105 | module_name = ".".join(string.split(".")[:-1])
106 |
107 | if module_name == "":
108 | return getattr(sys.modules[__name__], class_name)
109 |
110 | mod = __import__(module_name, fromlist=[class_name])
111 | return getattr(mod, class_name)
112 |
113 |
114 | class ModuleAutoSummary(Autosummary):
115 | def get_items(self, names):
116 | new_names = []
117 | for name in names:
118 | mod = sys.modules[name]
119 | mod_items = getattr(mod, "__all__", mod.__dict__)
120 | for t in mod_items:
121 | if "." not in t and not t.startswith("_"):
122 | obj = get_by_name(f"{name}.{t}")
123 | if hasattr(obj, "__module__"):
124 | mod_name = obj.__module__
125 | t = f"{mod_name}.{t}"
126 | if t.startswith("pytorch_forecasting"):
127 | new_names.append(t)
128 | new_items = super().get_items(sorted(new_names))
129 | return new_items
130 |
131 |
132 | def setup(app: Sphinx):
133 | app.add_css_file("custom.css")
134 | app.connect("autodoc-skip-member", skip)
135 | app.add_directive("moduleautosummary", ModuleAutoSummary)
136 | app.add_js_file("https://buttons.github.io/buttons.js", **{"async": "async"})
137 |
138 |
139 | # autosummary
140 | # autosummary_generate = True
141 | # shutil.rmtree(SOURCE_PATH.joinpath("api"), ignore_errors=True)
142 |
143 |
144 | # copy changelog
145 | # shutil.copy(
146 | # "../../CHANGELOG.md",
147 | # "CHANGELOG.md",
148 | # )
149 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/forward.py:
--------------------------------------------------------------------------------
1 | """Earliest Possible Start Time
2 | Forward scheduler
3 | 正排"""
4 |
5 | import logging
6 | import math
7 |
8 | from lekin.solver.construction_heuristics.base import BaseScheduler
9 |
10 |
11 | class ForwardScheduler(BaseScheduler):
12 | def __init__(self, job_collector, resource_collector, route_collector=None, **kwargs):
13 | super().__init__(job_collector, resource_collector, **kwargs)
14 | self.job_collector = job_collector
15 | self.resource_collector = resource_collector
16 | self.route_collector = route_collector
17 |
18 | for key, value in kwargs.items():
19 | setattr(self, key, value)
20 |
21 | def run(self):
22 | for i, job in enumerate(self.job_collector.job_list):
23 | self.scheduling_job(job, self.resource_collector, self.route_collector)
24 | logging.info("First Scheduling Done")
25 |
26 | for i, job in enumerate(self.job_collector.job_list):
27 | self.rescheduling_job_to_resolve_conflict(job)
28 | logging.info("ReScheduling Done")
29 | return
30 |
31 | def scheduling_job(self, job, resource_collector, route_collector):
32 | logging.info(f"\nAssign Job {job.job_id}")
33 |
34 | if route_collector is not None:
35 | route_id = job.assigned_route_id
36 | route = None
37 | for r in route_collector:
38 | if r.route_id == route_id:
39 | route = r
40 | break
41 | if not route:
42 | print(f"Route with ID '{job.assigned_route_id}' not found for Job ID '{job.job_id}'. Skipping job.")
43 |
44 | job.operations = route.operations_sequence
45 |
46 | op_earliest_start = 0
47 | for operation in job.operations:
48 | logging.info(f"\tAssign Operation {operation.operation_id} of Job {job.job_id}")
49 | chosen_resource, chosen_timeslot_hour = self.find_best_resource_and_timeslot_for_operation(
50 | operation, op_earliest_start
51 | )
52 |
53 | if chosen_resource and chosen_timeslot_hour:
54 | logging.info(
55 | f"\tOperation {operation.operation_id} assigned in: resource"
56 | f" {chosen_resource.resource_id}, {min(chosen_timeslot_hour)} -"
57 | f" {max(chosen_timeslot_hour)}"
58 | )
59 |
60 | # assign
61 | operation.assigned_resource = chosen_resource
62 | operation.assigned_hours = chosen_timeslot_hour
63 | chosen_resource.assigned_operations.append(operation)
64 | chosen_resource.assigned_hours += chosen_timeslot_hour
65 |
66 | op_earliest_start = chosen_timeslot_hour[-1] + 1
67 | return
68 |
69 | def find_best_resource_and_timeslot_for_operation(self, operation, op_earliest_start, **kwargs):
70 | available_resource = operation.available_resource
71 |
72 | earliest_index = 0
73 | resource_earliest_time = float("inf")
74 | for i, resource in enumerate(available_resource):
75 | resource_time = resource.get_earliest_available_time(duration=operation.processing_time)
76 |
77 | if resource_time < resource_earliest_time:
78 | earliest_index = i
79 | resource_earliest_time = resource_time
80 |
81 | chosen_resource = available_resource[earliest_index]
82 | earliest_time = int(max(op_earliest_start, resource_earliest_time))
83 | chosen_hours = list(range(earliest_time, earliest_time + math.ceil(operation.processing_time)))
84 | return chosen_resource, chosen_hours
85 |
86 | def rescheduling_job_to_resolve_conflict(self, job):
87 | op_earliest_start = 0
88 | for operation in job.operations:
89 | logging.info(f"Rescheduling {job.job_id}/ {operation.operation_id}")
90 | assigned_resource = operation.assigned_resource
91 | if operation.assigned_hours[0] < op_earliest_start:
92 | delta = op_earliest_start - operation.assigned_hours[0]
93 | operation.assigned_hours = [i + delta for i in operation.assigned_hours]
94 |
95 | pivot_assigned_hours = operation.assigned_hours
96 | op_earliest_start = pivot_assigned_hours[-1] + 1
97 |
98 | ops_in_same_resource = assigned_resource.assigned_operations
99 | ops_in_same_resource.sort(key=lambda x: x.assigned_hours[0], reverse=False)
100 | logging.info([i.parent_job_id for i in ops_in_same_resource])
101 | for op in ops_in_same_resource:
102 | if op != operation:
103 | op_start = op.assigned_hours[0]
104 | if set(pivot_assigned_hours).intersection(set(op.assigned_hours)):
105 | logging.info(
106 | f"\tRescheduling {job.job_id}/ {op.operation_id} in {assigned_resource.resource_id}"
107 | )
108 | op.assigned_hours = [i + pivot_assigned_hours[-1] - op_start + 1 for i in op.assigned_hours]
109 | return
110 |
--------------------------------------------------------------------------------
/lekin/solver/reinforcement_learning/q_learning.py:
--------------------------------------------------------------------------------
1 | """
2 | Q-Learning based solver for job shop scheduling problems.
3 | """
4 |
5 | import logging
6 | from typing import Any, Dict, List, Optional
7 |
8 | import numpy as np
9 |
10 | from lekin.lekin_struct.job import Job
11 | from lekin.lekin_struct.operation import Operation
12 | from lekin.lekin_struct.resource import Resource
13 | from lekin.lekin_struct.route import Route
14 | from lekin.solver.core.base_solver import BaseSolver
15 | from lekin.solver.core.solution import Solution
16 | from lekin.solver.reinforcement_learning.agent import DQNAgent
17 | from lekin.solver.reinforcement_learning.environment import SchedulingEnvironment
18 |
19 | logger = logging.getLogger(__name__)
20 |
21 |
22 | class QLearningSolver(BaseSolver):
23 | """Q-Learning based solver for job shop scheduling problems."""
24 |
25 | def __init__(self, config: Optional[Dict[str, Any]] = None):
26 | """Initialize the Q-Learning solver.
27 |
28 | Args:
29 | config: Optional configuration dictionary with the following keys:
30 | - learning_rate: float, learning rate for Q-learning (default: 0.001)
31 | - gamma: float, discount factor (default: 0.99)
32 | - epsilon: float, exploration rate (default: 1.0)
33 | - epsilon_decay: float, decay rate for exploration (default: 0.995)
34 | - epsilon_min: float, minimum exploration rate (default: 0.01)
35 | - batch_size: int, size of training batches (default: 64)
36 | - buffer_size: int, size of replay buffer (default: 10000)
37 | - target_update: int, frequency of target network updates (default: 10)
38 | - hidden_size: int, size of hidden layers (default: 128)
39 | - num_episodes: int, number of training episodes (default: 1000)
40 | - model_path: str, path to save/load the trained model (default: None)
41 | """
42 | super().__init__(config)
43 | self.agent = DQNAgent(config)
44 | self.num_episodes = self.config.get("num_episodes", 1000)
45 | self.model_path = self.config.get("model_path")
46 |
47 | def solve(self, jobs: List[Job], routes: List[Route], resources: List[Resource]) -> Solution:
48 | """Solve the scheduling problem using Q-Learning.
49 |
50 | Args:
51 | jobs: List of jobs to be scheduled
52 | routes: List of available routes
53 | resources: List of available resources
54 |
55 | Returns:
56 | A solution to the scheduling problem
57 | """
58 | # Create environment
59 | env = SchedulingEnvironment(jobs, routes, resources)
60 |
61 | # Initialize agent networks if not loaded from file
62 | if not self.model_path:
63 | input_size = 10 # Size of state vector
64 | output_size = len(jobs) # Number of possible actions
65 | self.agent.initialize_networks(input_size, output_size)
66 |
67 | # Train the agent
68 | logger.info("Starting training...")
69 | self.agent.train(env, self.num_episodes)
70 |
71 | # Save the trained model if path is provided
72 | if self.model_path:
73 | self.agent.save(self.model_path)
74 | else:
75 | # Load pre-trained model
76 | self.agent.load(self.model_path)
77 |
78 | # Use the trained agent to find the best solution
79 | best_solution = None
80 | best_makespan = float("inf")
81 |
82 | # Run multiple episodes to find the best solution
83 | for _ in range(10):
84 | state = env.reset()
85 | done = False
86 |
87 | while not done:
88 | action = self.agent.select_action(state, env.get_valid_actions())
89 | state, _, done, _ = env.step(action)
90 |
91 | solution = env.solution
92 | makespan = solution.get_makespan()
93 |
94 | if makespan < best_makespan:
95 | best_makespan = makespan
96 | best_solution = solution
97 |
98 | logger.info(f"Trial makespan: {makespan:.2f}")
99 |
100 | if not best_solution:
101 | raise RuntimeError("Failed to find a valid solution")
102 |
103 | return best_solution
104 |
105 | def validate_solution(self, solution: Solution) -> bool:
106 | """Validate if a solution meets all constraints.
107 |
108 | Args:
109 | solution: The solution to validate
110 |
111 | Returns:
112 | True if solution is valid, False otherwise
113 | """
114 | if not solution.assignments:
115 | return False
116 |
117 | # Check for resource conflicts
118 | resource_assignments = {}
119 | for assignment in solution.assignments:
120 | if assignment.resource_id not in resource_assignments:
121 | resource_assignments[assignment.resource_id] = []
122 | resource_assignments[assignment.resource_id].append(assignment)
123 |
124 | # Check for overlapping assignments on the same resource
125 | for resource_id, assignments in resource_assignments.items():
126 | assignments.sort(key=lambda x: x.start_time)
127 | for i in range(len(assignments) - 1):
128 | if assignments[i].end_time > assignments[i + 1].start_time:
129 | return False
130 |
131 | return True
132 |
--------------------------------------------------------------------------------
/lekin/solver/core/solution.py:
--------------------------------------------------------------------------------
1 | """
2 | Core solution representation for scheduling.
3 | """
4 |
5 | from dataclasses import dataclass, field
6 | from datetime import datetime
7 | from typing import Any, Dict, List, Optional
8 |
9 |
10 | @dataclass
11 | class OperationAssignment:
12 | """Represents the assignment of an operation to a resource and time slot."""
13 |
14 | operation_id: str
15 | job_id: str
16 | resource_id: str
17 | start_time: datetime
18 | end_time: datetime
19 | sequence_number: int = 0
20 |
21 | @property
22 | def duration(self) -> float:
23 | """Get the duration of the operation in hours."""
24 | return (self.end_time - self.start_time).total_seconds() / 3600
25 |
26 |
27 | @dataclass
28 | class Solution:
29 | """Represents a scheduling solution."""
30 |
31 | problem_id: str
32 | solver_type: str
33 | assignments: List[OperationAssignment] = field(default_factory=list)
34 | metadata: Dict[str, Any] = field(default_factory=dict)
35 |
36 | def add_assignment(self, assignment: OperationAssignment) -> None:
37 | """Add an operation assignment to the solution.
38 |
39 | Args:
40 | assignment: The operation assignment to add
41 | """
42 | self.assignments.append(assignment)
43 |
44 | def get_assignments_by_job(self, job_id: str) -> List[OperationAssignment]:
45 | """Get all assignments for a specific job.
46 |
47 | Args:
48 | job_id: The ID of the job
49 |
50 | Returns:
51 | List of operation assignments for the job
52 | """
53 | return [a for a in self.assignments if a.job_id == job_id]
54 |
55 | def get_assignments_by_resource(self, resource_id: str) -> List[OperationAssignment]:
56 | """Get all assignments for a specific resource.
57 |
58 | Args:
59 | resource_id: The ID of the resource
60 |
61 | Returns:
62 | List of operation assignments for the resource
63 | """
64 | return [a for a in self.assignments if a.resource_id == resource_id]
65 |
66 | def get_resource_utilization(self, resource_id: str) -> float:
67 | """Calculate the utilization of a resource.
68 |
69 | Args:
70 | resource_id: The ID of the resource
71 |
72 | Returns:
73 | Resource utilization as a float between 0 and 1
74 | """
75 | resource_assignments = self.get_assignments_by_resource(resource_id)
76 | if not resource_assignments:
77 | return 0.0
78 |
79 | total_duration = sum(a.duration for a in resource_assignments)
80 | time_span = max(a.end_time for a in resource_assignments) - min(a.start_time for a in resource_assignments)
81 | return total_duration / time_span.total_seconds() * 3600
82 |
83 | def get_makespan(self) -> float:
84 | """Calculate the makespan of the solution.
85 |
86 | Returns:
87 | Makespan in hours
88 | """
89 | if not self.assignments:
90 | return 0.0
91 |
92 | return (
93 | max(a.end_time for a in self.assignments) - min(a.start_time for a in self.assignments)
94 | ).total_seconds() / 3600
95 |
96 | def get_tardiness(self) -> float:
97 | """Calculate the total tardiness of the solution.
98 |
99 | Returns:
100 | Total tardiness in hours
101 | """
102 | # Implement your tardiness calculation logic here
103 | return 0.0
104 |
105 | def to_dict(self) -> Dict[str, Any]:
106 | """Convert the solution to a dictionary representation.
107 |
108 | Returns:
109 | Dictionary representation of the solution
110 | """
111 | return {
112 | "problem_id": self.problem_id,
113 | "solver_type": self.solver_type,
114 | "assignments": [
115 | {
116 | "operation_id": a.operation_id,
117 | "job_id": a.job_id,
118 | "resource_id": a.resource_id,
119 | "start_time": a.start_time.isoformat(),
120 | "end_time": a.end_time.isoformat(),
121 | "sequence_number": a.sequence_number,
122 | }
123 | for a in self.assignments
124 | ],
125 | "metadata": self.metadata,
126 | }
127 |
128 | @classmethod
129 | def from_dict(cls, data: Dict[str, Any]) -> "Solution":
130 | """Create a solution from a dictionary representation.
131 |
132 | Args:
133 | data: Dictionary representation of the solution
134 |
135 | Returns:
136 | Solution instance
137 | """
138 | solution = cls(
139 | problem_id=data["problem_id"], solver_type=data["solver_type"], metadata=data.get("metadata", {})
140 | )
141 |
142 | for assignment_data in data["assignments"]:
143 | solution.add_assignment(
144 | OperationAssignment(
145 | operation_id=assignment_data["operation_id"],
146 | job_id=assignment_data["job_id"],
147 | resource_id=assignment_data["resource_id"],
148 | start_time=datetime.fromisoformat(assignment_data["start_time"]),
149 | end_time=datetime.fromisoformat(assignment_data["end_time"]),
150 | sequence_number=assignment_data.get("sequence_number", 0),
151 | )
152 | )
153 |
154 | return solution
155 |
--------------------------------------------------------------------------------
/lekin/solver/meta_heuristics/genetic.py:
--------------------------------------------------------------------------------
1 | """Genetic scheduler"""
2 |
3 | import copy
4 | import random
5 |
6 | from lekin.lekin_struct import JobCollector, ResourceCollector, RouteCollector
7 |
8 |
9 | class GeneticScheduler:
10 | def __init__(
11 | self,
12 | job_collector: JobCollector,
13 | resource_collector: ResourceCollector,
14 | route_collector: RouteCollector = None,
15 | initial_schedule=None,
16 | **kwargs,
17 | ):
18 | self.job_collector = job_collector
19 | self.initial_schedule = initial_schedule
20 | self.optimizer = GeneticOPT()
21 |
22 | def parse_gene_permutation_to_solution(self):
23 | return
24 |
25 |
26 | class GeneticOPT(object):
27 | def __init__(
28 | self,
29 | population_size=50,
30 | generations=1000,
31 | crossover_rate=0.8,
32 | mutation_rate=0.2,
33 | ):
34 | self.population_size = population_size
35 | self.generations = generations
36 | self.crossover_rate = crossover_rate
37 | self.mutation_rate = mutation_rate
38 |
39 | def run(self):
40 | population = self.initialize_population()
41 |
42 | for generation in range(self.generations):
43 | selected_individuals = self.selection(population)
44 | new_population = []
45 |
46 | while len(new_population) < self.population_size:
47 | parent1 = random.choice(selected_individuals)
48 | parent2 = random.choice(selected_individuals)
49 |
50 | if random.random() < self.crossover_rate:
51 | offspring1, offspring2 = self.crossover(parent1, parent2)
52 | else:
53 | offspring1, offspring2 = parent1, parent2
54 |
55 | if random.random() < self.mutation_rate:
56 | offspring1 = self.mutation(offspring1)
57 | if random.random() < self.mutation_rate:
58 | offspring2 = self.mutation(offspring2)
59 |
60 | new_population.append(offspring1)
61 | new_population.append(offspring2)
62 |
63 | population = new_population
64 |
65 | # Find the best solution in the final population
66 | best_solution = min(population, key=lambda chromosome: self.fitness(chromosome)[0])
67 |
68 | # Return the best schedule
69 | return self.job_collector.create_schedule_from_operations(best_solution)
70 |
71 | def initialize_population(self):
72 | population = []
73 | for _ in range(self.population_size):
74 | # Shuffle the operations for each job to create a random chromosome
75 | chromosome = copy.deepcopy(self.job_collector.get_operations())
76 | for job_operations in chromosome.values():
77 | random.shuffle(job_operations)
78 | population.append(chromosome)
79 | return population
80 |
81 | def _init_ms(self):
82 | # ms_sequence: A list of resource IDs representing the machine sequence
83 | return
84 |
85 | def _init_os(self, jobs, resources):
86 | # os_sequence: A list of operation IDs representing the operation sequence.
87 | os_sequence = []
88 | ms_sequence = []
89 |
90 | all_operations = [op for job in jobs for group_op in job.group_operations for op in group_op.operations]
91 | random.shuffle(all_operations)
92 | for op in all_operations:
93 | os_sequence.append(op.operation_id)
94 |
95 | # Randomly assign a resource to each operation
96 | assigned_resource = random.choice(resources).resource_id
97 | ms_sequence.append(assigned_resource)
98 |
99 | return os_sequence, ms_sequence
100 |
101 | def fitness(self, chromosome):
102 | # Calculate the fitness of a chromosome based on the scheduling criteria (e.g., makespan, tardiness)
103 | # The lower the fitness value, the better the solution
104 | # Return a tuple with the fitness value and the schedule
105 | schedule = self.job_collector.create_schedule_from_operations(chromosome)
106 | fitness_value = 0
107 | return fitness_value, schedule
108 |
109 | def selection(self, population):
110 | # Select the best individuals based on their fitness values
111 | # You can use tournament selection, rank-based selection, or other methods
112 | # Return the selected individuals
113 | selected_individuals = 0
114 | return selected_individuals
115 |
116 | def crossover(self, parent1, parent2):
117 | # Perform crossover (recombination) between two parents to create two offspring
118 | # You can use one-point crossover, two-point crossover, or other methods
119 | # Return the two offspring
120 | offspring1, offspring2 = 0, 0
121 | return offspring1, offspring2
122 |
123 | def mutation(self, chromosome):
124 | # Introduce random changes in the chromosome to add diversity
125 | # You can swap two operations for a randomly selected job
126 | # Return the mutated chromosome
127 | mutated_chromosome = 0
128 | return mutated_chromosome
129 |
130 | # def decode(self):
131 | # scheduling_result = SchedulingResult()
132 | # resource_availability = {
133 | # res.resource_id: 0 for res in resources
134 | # } # Tracks next available time for each resource
135 | #
136 | # for op_id, res_id in zip(os_sequence, ms_sequence):
137 | # op = operations[op_id]
138 | # resource_ready_time = resource_availability[res_id]
139 | # start_time = max(resource_ready_time, op.earliest_start)
140 | # end_time = start_time + op.processing_time
141 | #
142 | # # Update the schedule and resource availability
143 | # scheduling_result.schedule[op_id] = (res_id, start_time, end_time)
144 | # resource_availability[res_id] = end_time
145 | #
146 | # return scheduling_result
147 |
--------------------------------------------------------------------------------
/lekin/solver/construction_heuristics/backward.py:
--------------------------------------------------------------------------------
1 | """Latest Possible Start Time
2 | Backward scheduler
3 | 倒排"""
4 |
5 | import logging
6 | import math
7 |
8 | from lekin.lekin_struct.job import Job, JobCollector
9 | from lekin.lekin_struct.operation import Operation
10 | from lekin.lekin_struct.resource import ResourceCollector
11 | from lekin.lekin_struct.route import RouteCollector
12 | from lekin.lekin_struct.timeslot import TimeSlot
13 | from lekin.solver.construction_heuristics.base import BaseScheduler
14 |
15 |
16 | class BackwardScheduler(object):
17 | def __init__(
18 | self,
19 | job_collector: JobCollector,
20 | resource_collector: ResourceCollector,
21 | route_collector: RouteCollector = None,
22 | **kwargs,
23 | ) -> None:
24 | self.job_collector = job_collector
25 | self.resource_collector = resource_collector
26 | self.route_collector = route_collector
27 |
28 | for key, value in kwargs.items():
29 | setattr(self, key, value)
30 |
31 | def run(self) -> None:
32 | for i, job in enumerate(self.job_collector.job_list):
33 | self.scheduling_job(job, self.resource_collector, self.route_collector)
34 | logging.info("First Scheduling Done")
35 | return
36 |
37 | def scheduling_job(self, job: Job, resource_collector, route_collector: RouteCollector) -> None:
38 | logging.info(f"\nAssign Job {job.job_id}")
39 |
40 | if route_collector is not None:
41 | route_id = job.assigned_route_id
42 | route = None
43 | for r in route_collector:
44 | if r.route_id == route_id:
45 | route = r
46 | break
47 | if not route:
48 | print(f"Route with ID '{job.assigned_route_id}' not found for Job ID '{job.job_id}'. Skipping job.")
49 |
50 | job.operations = route.operations_sequence
51 |
52 | op_earliest_start = 0 # forward constraint
53 | op_latest_end = 150 # backward constraint
54 | for operation in job.operations[::-1]: # inverse
55 | logging.info(f"\tAssign Operation {operation.operation_id} of Job {job.job_id}")
56 | chosen_resource, chosen_timeslot_hour = self.find_best_resource_and_timeslot_for_operation(
57 | operation, op_latest_end, op_earliest_start
58 | )
59 |
60 | if chosen_resource and chosen_timeslot_hour:
61 | logging.info(
62 | f"\tOperation {operation.operation_id} assigned in: resource"
63 | f" {chosen_resource.resource_id}, {min(chosen_timeslot_hour)} -"
64 | f" {max(chosen_timeslot_hour)}"
65 | )
66 |
67 | # assign
68 | operation.assigned_resource = chosen_resource
69 | operation.assigned_hours = chosen_timeslot_hour
70 | chosen_resource.assigned_operations.append(operation)
71 | chosen_resource.assigned_hours += chosen_timeslot_hour
72 |
73 | # op_earliest_start = chosen_timeslot_hour[-1] + 1
74 | op_latest_end = chosen_timeslot_hour[0] - 1
75 | return
76 |
77 | def find_best_resource_and_timeslot_for_operation(
78 | self, operation: Operation, op_latest_end=None, op_earliest_start=None, **kwargs
79 | ):
80 | available_resource = operation.available_resource
81 |
82 | latest_index = float("inf")
83 | resource_latest_time = 0
84 | for i, resource in enumerate(available_resource):
85 | resource_time = resource.get_latest_available_time(duration=operation.processing_time, end=op_latest_end)
86 |
87 | if resource_time > resource_latest_time:
88 | latest_index = i
89 | resource_latest_time = resource_time
90 |
91 | chosen_resource = available_resource[latest_index]
92 | latest_time = int(min(op_latest_end, resource_latest_time))
93 | chosen_hours = list(range(latest_time - math.ceil(operation.processing_time), latest_time + 0))
94 | return chosen_resource, chosen_hours
95 |
96 | def assign_operation(self, operation: Operation, start_time, end_time, resources):
97 | timeslot = TimeSlot(start_time, end_time)
98 | self.timeslots.append(timeslot)
99 | for resource in resources:
100 | # Add timeslot to resource's schedule
101 | resource.schedule.append(timeslot)
102 | # Link operation to scheduled timeslot
103 | operation.scheduled_timeslot = timeslot
104 |
105 | def select_resources(self, job: Job, operation: Operation):
106 | available_slots = self.find_available_timeslots(job, operation)
107 |
108 | selected_resources = []
109 | for slot in available_slots:
110 | resources = slot.available_resources()
111 | resource = self.optimize_resource_selection(resources, operation)
112 | selected_resources.append((slot, resource))
113 | return selected_resources
114 |
115 | def find_available_timeslots(self, job, operation):
116 | # Search timeslots and filter based on:
117 | # - operation duration
118 | # - predecessor timeslots
119 | # - resource requirements
120 |
121 | slots = []
122 | # for ts in job.schedule.timeslots:
123 | # if ts.end - ts.start >= operation.duration:
124 | # if all(pred in job.predecessors(ts)):
125 | # if ts.meets_resource_needs(operation):
126 | # slots.append(ts)
127 | return slots
128 |
129 | def optimize_resource_selection(self, resources, operation):
130 | # Score and prioritize resources based on:
131 | # - Capacity
132 | # - Changeover time
133 | # - Utilization
134 |
135 | scored = []
136 | for resource in resources:
137 | score = 0
138 | if resource.capacity >= operation.required_capacity:
139 | score += 1
140 | if resource.type in operation.preferred_resources:
141 | score += 1
142 | # Prioritize resources with less adjacent timeslots
143 | score -= len(resource.adjacent_timeslots(operation))
144 | scored.append((score, resource))
145 | best = max(scored, key=lambda x: x[0])
146 | return best[1]
147 |
--------------------------------------------------------------------------------
/lekin/lekin_struct/allocation.py:
--------------------------------------------------------------------------------
1 | """
2 | Allocation module for representing operation assignments in job shop scheduling.
3 |
4 | This module provides the Allocation class for managing the assignment of operations
5 | to resources and time slots in the scheduling process.
6 | """
7 |
8 | from dataclasses import dataclass, field
9 | from datetime import datetime
10 | from typing import TYPE_CHECKING, Any, List, Optional
11 |
12 | if TYPE_CHECKING:
13 | from .execution_mode import ExecutionMode
14 | from .operation import Operation
15 |
16 |
17 | @dataclass
18 | class Allocation:
19 | """Represents an assignment of an operation to a resource and time slot.
20 |
21 | An allocation defines when and how an operation will be executed, including
22 | its execution mode, timing constraints, and dependencies on other allocations.
23 |
24 | Attributes:
25 | id (str): Unique identifier for the allocation
26 | operation (Operation): The operation being allocated
27 | execution_mode (Optional[ExecutionMode]): The chosen execution mode
28 | delay (Optional[int]): Delay before starting the operation
29 | predecessors (List[Allocation]): Allocations that must complete before this one
30 | successors (List[Allocation]): Allocations that must start after this one
31 | start_date (Optional[int]): Calculated start date of the operation
32 | end_date (Optional[int]): Calculated end date of the operation
33 | busy_dates (List[int]): List of dates when the operation is being processed
34 | status (str): Current status of the allocation
35 | assigned_resource (Optional[str]): ID of the resource assigned to this allocation
36 | """
37 |
38 | id: str
39 | operation: "Operation"
40 | execution_mode: Optional["ExecutionMode"] = None
41 | delay: Optional[int] = None
42 | predecessors: List["Allocation"] = field(default_factory=list)
43 | successors: List["Allocation"] = field(default_factory=list)
44 | start_date: Optional[int] = None
45 | end_date: Optional[int] = None
46 | busy_dates: List[int] = field(default_factory=list)
47 | status: str = "pending"
48 | assigned_resource: Optional[str] = None
49 |
50 | def __post_init__(self):
51 | """Validate allocation attributes after initialization."""
52 | if not isinstance(self.id, str) or not self.id:
53 | raise ValueError("id must be a non-empty string")
54 | if not hasattr(self.operation, "id"):
55 | raise ValueError("operation must be a valid Operation instance")
56 |
57 | def set_execution_mode(self, mode: "ExecutionMode") -> None:
58 | """Set the execution mode for this allocation.
59 |
60 | Args:
61 | mode: The execution mode to set
62 |
63 | Raises:
64 | ValueError: If mode is not a valid ExecutionMode instance
65 | """
66 | if not hasattr(mode, "duration"):
67 | raise ValueError("mode must be a valid ExecutionMode instance")
68 | self.execution_mode = mode
69 | self.invalidate_computed_variables()
70 |
71 | def set_delay(self, delay: int) -> None:
72 | """Set the delay before starting the operation.
73 |
74 | Args:
75 | delay: The delay value to set
76 |
77 | Raises:
78 | ValueError: If delay is negative
79 | """
80 | if delay < 0:
81 | raise ValueError("delay must be non-negative")
82 | self.delay = delay
83 | self.invalidate_computed_variables()
84 |
85 | def invalidate_computed_variables(self) -> None:
86 | """Invalidate all computed timing variables."""
87 | self.start_date = None
88 | self.end_date = None
89 | self.busy_dates = []
90 | self.status = "pending"
91 |
92 | def compute_dates(self) -> None:
93 | """Compute the start date, end date, and busy dates for this allocation.
94 |
95 | This method calculates the timing information based on the execution mode
96 | and delay. It should be called after setting the execution mode and delay.
97 | """
98 | if self.execution_mode and self.delay is not None:
99 | self.start_date = self.delay
100 | self.end_date = self.start_date + self.execution_mode.duration
101 | self.busy_dates = list(range(self.start_date, self.end_date))
102 | self.status = "scheduled"
103 |
104 | def add_predecessor(self, allocation: "Allocation") -> None:
105 | """Add a predecessor allocation.
106 |
107 | Args:
108 | allocation: The predecessor allocation to add
109 |
110 | Raises:
111 | ValueError: If allocation is not a valid Allocation instance
112 | """
113 | if not isinstance(allocation, Allocation):
114 | raise ValueError("allocation must be an Allocation instance")
115 | if allocation not in self.predecessors:
116 | self.predecessors.append(allocation)
117 | if self not in allocation.successors:
118 | allocation.successors.append(self)
119 |
120 | def add_successor(self, allocation: "Allocation") -> None:
121 | """Add a successor allocation.
122 |
123 | Args:
124 | allocation: The successor allocation to add
125 |
126 | Raises:
127 | ValueError: If allocation is not a valid Allocation instance
128 | """
129 | if not isinstance(allocation, Allocation):
130 | raise ValueError("allocation must be an Allocation instance")
131 | if allocation not in self.successors:
132 | self.successors.append(allocation)
133 | if self not in allocation.predecessors:
134 | allocation.predecessors.append(self)
135 |
136 | def is_valid(self) -> bool:
137 | """Check if this allocation is valid.
138 |
139 | Returns:
140 | bool: True if the allocation is valid, False otherwise
141 | """
142 | return (
143 | self.execution_mode is not None
144 | and self.delay is not None
145 | and self.start_date is not None
146 | and self.end_date is not None
147 | )
148 |
149 | def __repr__(self) -> str:
150 | """Return a string representation of the allocation."""
151 | return f"Allocation(id={self.id}, " f"operation={self.operation.id}, " f"status={self.status})"
152 |
153 | def __str__(self) -> str:
154 | """Return a human-readable string representation of the allocation."""
155 | return f"Allocation {self.id}: " f"Operation {self.operation.id} " f"({self.status})"
156 |
--------------------------------------------------------------------------------
/lekin/lekin_struct/relation.py:
--------------------------------------------------------------------------------
1 | """
2 | Operation Relations Module
3 |
4 | This module defines the relationships between operations in a job shop scheduling problem.
5 | These relationships determine how operations are scheduled relative to each other.
6 |
7 | The module supports various types of relationships:
8 | - ES (End-Start): Predecessor must finish before successor can start
9 | - ES-Split: Successor starts after all split operations of predecessor complete
10 | - SS (Start-Start): Operations can start simultaneously
11 | - SS-Split: Successor starts with the last split operation of predecessor
12 | - EE (End-End): Operations must finish together
13 | - EE-Split: Successor's end time depends on predecessor's split operations
14 | """
15 |
16 | from dataclasses import dataclass
17 | from datetime import datetime
18 | from enum import Enum
19 | from typing import Any, Dict, List, Optional
20 |
21 |
22 | class RelationType(Enum):
23 | """Types of relationships between operations."""
24 |
25 | ES = "ES" # End-Start: Predecessor must finish before successor can start
26 | ES_SPLIT = "ES-Split" # Successor starts after all split operations complete
27 | SS = "SS" # Start-Start: Operations can start simultaneously
28 | SS_SPLIT = "SS-Split" # Successor starts with last split operation
29 | EE = "EE" # End-End: Operations must finish together
30 | EE_SPLIT = "EE-Split" # Successor's end time depends on split operations
31 | EE_SS = "EE-SS" # Special case where successor must start with predecessor
32 |
33 |
34 | @dataclass
35 | class OperationRelation:
36 | """Represents a relationship between two operations.
37 |
38 | Attributes:
39 | relation_type (RelationType): Type of relationship between operations
40 | predecessor_id (str): ID of the predecessor operation
41 | successor_id (str): ID of the successor operation
42 | lag_time (float): Minimum time required between operations
43 | lead_time (float): Maximum time allowed between operations
44 | split_operations (Optional[List[str]]): IDs of split operations if applicable
45 | """
46 |
47 | relation_type: RelationType
48 | predecessor_id: str
49 | successor_id: str
50 | lag_time: float = 0.0
51 | lead_time: float = 0.0
52 | split_operations: Optional[List[str]] = None
53 |
54 | def __post_init__(self):
55 | """Validate the operation relation after initialization."""
56 | if not isinstance(self.relation_type, RelationType):
57 | raise ValueError("relation_type must be a RelationType enum")
58 | if not isinstance(self.predecessor_id, str) or not self.predecessor_id:
59 | raise ValueError("predecessor_id must be a non-empty string")
60 | if not isinstance(self.successor_id, str) or not self.successor_id:
61 | raise ValueError("successor_id must be a non-empty string")
62 | if self.lag_time < 0:
63 | raise ValueError("lag_time must be non-negative")
64 | if self.lead_time < 0:
65 | raise ValueError("lead_time must be non-negative")
66 | if self.split_operations and not all(isinstance(op_id, str) for op_id in self.split_operations):
67 | raise ValueError("split_operations must be a list of strings")
68 |
69 | def calculate_start_time(self, predecessor_end_time: datetime) -> datetime:
70 | """Calculate the earliest start time for the successor operation.
71 |
72 | Args:
73 | predecessor_end_time: The end time of the predecessor operation
74 |
75 | Returns:
76 | datetime: The earliest start time for the successor operation
77 | """
78 | if self.relation_type in [RelationType.ES, RelationType.ES_SPLIT]:
79 | return predecessor_end_time + self.lag_time
80 | return predecessor_end_time
81 |
82 | def calculate_end_time(self, predecessor_end_time: datetime) -> datetime:
83 | """Calculate the required end time for the successor operation.
84 |
85 | Args:
86 | predecessor_end_time: The end time of the predecessor operation
87 |
88 | Returns:
89 | datetime: The required end time for the successor operation
90 | """
91 | if self.relation_type in [RelationType.EE, RelationType.EE_SPLIT]:
92 | return predecessor_end_time
93 | return predecessor_end_time + self.lead_time
94 |
95 | def __repr__(self) -> str:
96 | return (
97 | f"OperationRelation(type={self.relation_type.value}, "
98 | f"predecessor={self.predecessor_id}, successor={self.successor_id})"
99 | )
100 |
101 |
102 | class RelationManager:
103 | """Manages relationships between operations in the scheduling system.
104 |
105 | This class provides methods to store, retrieve, and manage operation
106 | relationships, ensuring proper scheduling constraints are maintained.
107 |
108 | Attributes:
109 | relations (Dict[str, List[OperationRelation]]): Map of operation IDs to their relationships
110 | """
111 |
112 | def __init__(self):
113 | """Initialize an empty relation manager."""
114 | self.relations: Dict[str, List[OperationRelation]] = {}
115 |
116 | def add_relation(self, relation: OperationRelation) -> None:
117 | """Add a relationship between operations.
118 |
119 | Args:
120 | relation: The operation relation to add
121 | """
122 | if not isinstance(relation, OperationRelation):
123 | raise TypeError("relation must be an OperationRelation instance")
124 |
125 | # Add to predecessor's relations
126 | if relation.predecessor_id not in self.relations:
127 | self.relations[relation.predecessor_id] = []
128 | self.relations[relation.predecessor_id].append(relation)
129 |
130 | # Add to successor's relations
131 | if relation.successor_id not in self.relations:
132 | self.relations[relation.successor_id] = []
133 | self.relations[relation.successor_id].append(relation)
134 |
135 | def get_relations_for_operation(self, operation_id: str) -> List[OperationRelation]:
136 | """Get all relationships for a specific operation.
137 |
138 | Args:
139 | operation_id: The ID of the operation
140 |
141 | Returns:
142 | List[OperationRelation]: List of relationships for the operation
143 | """
144 | return self.relations.get(operation_id, [])
145 |
146 | def get_predecessors(self, operation_id: str) -> List[str]:
147 | """Get all predecessor operation IDs for a specific operation.
148 |
149 | Args:
150 | operation_id: The ID of the operation
151 |
152 | Returns:
153 | List[str]: List of predecessor operation IDs
154 | """
155 | return [rel.predecessor_id for rel in self.relations.get(operation_id, []) if rel.successor_id == operation_id]
156 |
157 | def get_successors(self, operation_id: str) -> List[str]:
158 | """Get all successor operation IDs for a specific operation.
159 |
160 | Args:
161 | operation_id: The ID of the operation
162 |
163 | Returns:
164 | List[str]: List of successor operation IDs
165 | """
166 | return [rel.successor_id for rel in self.relations.get(operation_id, []) if rel.predecessor_id == operation_id]
167 |
168 | def clear(self) -> None:
169 | """Clear all relationships from the manager."""
170 | self.relations.clear()
171 |
--------------------------------------------------------------------------------
/lekin/lekin_struct/timeslot.py:
--------------------------------------------------------------------------------
1 | """
2 | TimeSlot module for representing available time slots in job shop scheduling.
3 |
4 | This module provides the TimeSlot class for managing time slots in the scheduling process.
5 | A time slot represents a continuous period of time that can be assigned to operations.
6 | """
7 |
8 | from dataclasses import dataclass, field
9 | from datetime import datetime, timedelta
10 | from typing import Any, List, Optional, Union
11 |
12 | import pandas as pd
13 |
14 |
15 | @dataclass
16 | class TimeSlot:
17 | """Represents a time slot in the scheduling process.
18 |
19 | A time slot is a continuous period of time that can be assigned to operations.
20 | It tracks its start time, end time, and any assigned operation.
21 |
22 | Attributes:
23 | start_time (datetime): Start time of the slot
24 | end_time (datetime): End time of the slot
25 | assigned_operation (Optional[Any]): Operation assigned to this slot, if any
26 | duration (timedelta): Duration of the time slot
27 | metadata (Dict[str, Any]): Additional metadata for the time slot
28 | """
29 |
30 | start_time: datetime
31 | end_time: datetime
32 | assigned_operation: Optional[Any] = None
33 | metadata: dict = field(default_factory=dict)
34 |
35 | def __post_init__(self):
36 | """Validate time slot attributes after initialization."""
37 | if not isinstance(self.start_time, datetime):
38 | raise TypeError("start_time must be a datetime object")
39 | if not isinstance(self.end_time, datetime):
40 | raise TypeError("end_time must be a datetime object")
41 | if self.start_time >= self.end_time:
42 | raise ValueError("start_time must be before end_time")
43 |
44 | self.duration = self.end_time - self.start_time
45 |
46 | def assign_operation(self, operation: Any, processing_time: Union[int, float]) -> None:
47 | """Assign an operation to this time slot.
48 |
49 | Args:
50 | operation: The operation to assign
51 | processing_time: Processing time in hours
52 |
53 | Raises:
54 | ValueError: If the time slot is already occupied
55 | """
56 | if self.is_occupied():
57 | raise ValueError("Time slot is already occupied")
58 |
59 | self.assigned_operation = operation
60 | self.end_time = self.start_time + timedelta(hours=float(processing_time))
61 | self.duration = self.end_time - self.start_time
62 |
63 | def is_occupied(self) -> bool:
64 | """Check if the time slot is occupied by an operation.
65 |
66 | Returns:
67 | bool: True if the slot is occupied, False otherwise
68 | """
69 | return self.assigned_operation is not None
70 |
71 | @property
72 | def hours(self) -> List[datetime]:
73 | """Get list of hours in this time slot.
74 |
75 | Returns:
76 | List[datetime]: List of datetime objects representing each hour
77 | """
78 | return pd.date_range(start=self.start_time, end=self.end_time, freq="1H").tolist()[:-1]
79 |
80 | @property
81 | def duration_of_hours(self) -> int:
82 | """Get the duration of the time slot in hours.
83 |
84 | Returns:
85 | int: Number of hours in the time slot
86 | """
87 | return len(pd.date_range(start=self.start_time, end=self.end_time, freq="1H")) - 1
88 |
89 | def overlaps_with(self, timeslot: "TimeSlot") -> float:
90 | """Calculate the overlap duration with another time slot.
91 |
92 | Args:
93 | timeslot: Another TimeSlot to check overlap with
94 |
95 | Returns:
96 | float: Overlap duration in hours, 0 if no overlap
97 | """
98 | if not isinstance(timeslot, TimeSlot):
99 | raise TypeError("timeslot must be a TimeSlot instance")
100 |
101 | overlap_start = max(self.start_time, timeslot.start_time)
102 | overlap_end = min(self.end_time, timeslot.end_time)
103 |
104 | if overlap_start < overlap_end:
105 | return (overlap_end - overlap_start).total_seconds() / 3600
106 | return 0.0
107 |
108 | def contains(self, time: datetime) -> bool:
109 | """Check if a given time falls within this time slot.
110 |
111 | Args:
112 | time: The time to check
113 |
114 | Returns:
115 | bool: True if the time is within this slot, False otherwise
116 | """
117 | return self.start_time <= time < self.end_time
118 |
119 | def split_at(self, time: datetime) -> tuple["TimeSlot", "TimeSlot"]:
120 | """Split this time slot at a given time.
121 |
122 | Args:
123 | time: The time at which to split the slot
124 |
125 | Returns:
126 | tuple[TimeSlot, TimeSlot]: Two new time slots
127 |
128 | Raises:
129 | ValueError: If the split time is not within this slot
130 | """
131 | if not self.contains(time):
132 | raise ValueError("Split time must be within the time slot")
133 |
134 | first_slot = TimeSlot(self.start_time, time)
135 | second_slot = TimeSlot(time, self.end_time)
136 |
137 | if self.is_occupied():
138 | first_slot.assigned_operation = self.assigned_operation
139 |
140 | return first_slot, second_slot
141 |
142 | def merge_with(self, timeslot: "TimeSlot") -> "TimeSlot":
143 | """Merge this time slot with another adjacent time slot.
144 |
145 | Args:
146 | timeslot: Another TimeSlot to merge with
147 |
148 | Returns:
149 | TimeSlot: A new merged time slot
150 |
151 | Raises:
152 | ValueError: If the time slots are not adjacent
153 | """
154 | if not isinstance(timeslot, TimeSlot):
155 | raise TypeError("timeslot must be a TimeSlot instance")
156 |
157 | if self.end_time != timeslot.start_time and self.start_time != timeslot.end_time:
158 | raise ValueError("Time slots must be adjacent to merge")
159 |
160 | start = min(self.start_time, timeslot.start_time)
161 | end = max(self.end_time, timeslot.end_time)
162 |
163 | merged = TimeSlot(start, end)
164 | if self.is_occupied():
165 | merged.assigned_operation = self.assigned_operation
166 | elif timeslot.is_occupied():
167 | merged.assigned_operation = timeslot.assigned_operation
168 |
169 | return merged
170 |
171 | def __repr__(self) -> str:
172 | """Get string representation of the time slot.
173 |
174 | Returns:
175 | str: String representation
176 | """
177 | return (
178 | f"TimeSlot(start_time={self.start_time}, end_time={self.end_time}, "
179 | f"duration={self.duration}, occupied={self.is_occupied()})"
180 | )
181 |
182 | def __eq__(self, other: object) -> bool:
183 | """Check if two time slots are equal.
184 |
185 | Args:
186 | other: Another object to compare with
187 |
188 | Returns:
189 | bool: True if the time slots are equal, False otherwise
190 | """
191 | if not isinstance(other, TimeSlot):
192 | return NotImplemented
193 | return (
194 | self.start_time == other.start_time
195 | and self.end_time == other.end_time
196 | and self.assigned_operation == other.assigned_operation
197 | )
198 |
199 | def __hash__(self) -> int:
200 | """Get hash value of the time slot.
201 |
202 | Returns:
203 | int: Hash value
204 | """
205 | return hash((self.start_time, self.end_time, self.assigned_operation))
206 |
--------------------------------------------------------------------------------
/lekin/lekin_struct/route.py:
--------------------------------------------------------------------------------
1 | """
2 | Route module for representing operation sequences in job shop scheduling.
3 |
4 | This module provides the Route class and RouteCollector for managing operation sequences
5 | and their associated resources in the scheduling process.
6 | """
7 |
8 | from dataclasses import dataclass, field
9 | from typing import Any, Dict, List, Optional, Union
10 |
11 | from lekin.lekin_struct.exceptions import ValidationError
12 | from lekin.lekin_struct.operation import Operation
13 | from lekin.lekin_struct.resource import Resource
14 | from lekin.lekin_struct.timeslot import TimeSlot
15 |
16 |
17 | @dataclass
18 | class Route:
19 | """Represents a sequence of operations in the scheduling process.
20 |
21 | A route defines the order in which operations should be performed and
22 | specifies which resources are available for each operation.
23 |
24 | Attributes:
25 | route_id (str): Unique identifier for the route
26 | operations_sequence (List[Operation]): Ordered list of operations
27 | available_resources (List[Resource]): List of resources that can perform operations
28 | available_time_slots (List[TimeSlot]): List of available time slots
29 | metadata (Dict[str, Any]): Additional metadata for the route
30 | """
31 |
32 | route_id: str
33 | operations_sequence: List[Operation] = field(default_factory=list)
34 | available_resources: List[Resource] = field(default_factory=list)
35 | available_time_slots: List[TimeSlot] = field(default_factory=list)
36 | metadata: Dict[str, Any] = field(default_factory=dict)
37 |
38 | def __post_init__(self):
39 | """Validate route attributes after initialization."""
40 | if not isinstance(self.route_id, str) or not self.route_id:
41 | raise ValidationError("route_id must be a non-empty string")
42 | if not all(isinstance(op, Operation) for op in self.operations_sequence):
43 | raise ValidationError("All operations must be Operation instances")
44 | if not all(isinstance(res, Resource) for res in self.available_resources):
45 | raise ValidationError("All resources must be Resource instances")
46 | if not all(isinstance(ts, TimeSlot) for ts in self.available_time_slots):
47 | raise ValidationError("All time slots must be TimeSlot instances")
48 |
49 | def add_operation(self, operation: Operation) -> None:
50 | """Add an operation to the sequence.
51 |
52 | Args:
53 | operation: The operation to add
54 |
55 | Raises:
56 | ValidationError: If operation is not an Operation instance
57 | """
58 | if not isinstance(operation, Operation):
59 | raise ValidationError("operation must be an Operation instance")
60 | self.operations_sequence.append(operation)
61 |
62 | def get_operations(self) -> List[Operation]:
63 | """Get the sequence of operations.
64 |
65 | Returns:
66 | List[Operation]: The sequence of operations
67 | """
68 | return self.operations_sequence
69 |
70 | def add_resource(self, resource: Resource) -> None:
71 | """Add a resource to the available resources.
72 |
73 | Args:
74 | resource: The resource to add
75 |
76 | Raises:
77 | ValidationError: If resource is not a Resource instance
78 | """
79 | if not isinstance(resource, Resource):
80 | raise ValidationError("resource must be a Resource instance")
81 | self.available_resources.append(resource)
82 |
83 | def add_time_slot(self, time_slot: TimeSlot) -> None:
84 | """Add a time slot to the available time slots.
85 |
86 | Args:
87 | time_slot: The time slot to add
88 |
89 | Raises:
90 | ValidationError: If time_slot is not a TimeSlot instance
91 | """
92 | if not isinstance(time_slot, TimeSlot):
93 | raise ValidationError("time_slot must be a TimeSlot instance")
94 | self.available_time_slots.append(time_slot)
95 |
96 | def get_total_processing_time(self) -> float:
97 | """Calculate total processing time for all operations.
98 |
99 | Returns:
100 | float: Total processing time
101 | """
102 | return sum(op.processing_time for op in self.operations_sequence)
103 |
104 | def get_operation_by_id(self, operation_id: str) -> Optional[Operation]:
105 | """Get an operation by its ID.
106 |
107 | Args:
108 | operation_id: ID of the operation to find
109 |
110 | Returns:
111 | Optional[Operation]: The found operation or None
112 | """
113 | return next((op for op in self.operations_sequence if op.operation_id == operation_id), None)
114 |
115 | def __eq__(self, other: object) -> bool:
116 | if not isinstance(other, Route):
117 | return NotImplemented
118 | return self.route_id == other.route_id
119 |
120 | def __hash__(self) -> int:
121 | return hash(self.route_id)
122 |
123 | def __str__(self) -> str:
124 | return f"Route(id={self.route_id})"
125 |
126 | def __repr__(self) -> str:
127 | return (
128 | f"Route(route_id='{self.route_id}', "
129 | f"operations={[op.operation_id for op in self.operations_sequence]}, "
130 | f"resources={[res.resource_id for res in self.available_resources]})"
131 | )
132 |
133 |
134 | @dataclass
135 | class RouteCollector:
136 | """Manages collections of routes in the scheduling system.
137 |
138 | This class provides methods to store, retrieve, and manage routes
139 | across different jobs. It maintains a centralized registry of all
140 | routes for efficient access and management.
141 |
142 | Attributes:
143 | routes (Dict[str, Route]): Map of route IDs to routes
144 | """
145 |
146 | routes: Dict[str, Route] = field(default_factory=dict)
147 |
148 | def add_route(self, route: Route) -> None:
149 | """Add a route to the collector.
150 |
151 | Args:
152 | route: The route to add
153 |
154 | Raises:
155 | ValidationError: If route is not a Route instance or if route_id already exists
156 | """
157 | if not isinstance(route, Route):
158 | raise ValidationError("route must be a Route instance")
159 | if route.route_id in self.routes:
160 | raise ValidationError(f"Route with ID {route.route_id} already exists")
161 | self.routes[route.route_id] = route
162 |
163 | def get_route_by_id(self, route_id: str) -> Optional[Route]:
164 | """Get a route by its ID.
165 |
166 | Args:
167 | route_id: ID of the route to find
168 |
169 | Returns:
170 | Optional[Route]: The found route or None
171 | """
172 | return self.routes.get(route_id)
173 |
174 | def get_all_routes(self) -> List[Route]:
175 | """Get all routes in the collector.
176 |
177 | Returns:
178 | List[Route]: List of all routes
179 | """
180 | return list(self.routes.values())
181 |
182 | def remove_route(self, route_id: str) -> None:
183 | """Remove a route from the collector.
184 |
185 | Args:
186 | route_id: ID of the route to remove
187 |
188 | Raises:
189 | KeyError: If route_id does not exist
190 | """
191 | if route_id not in self.routes:
192 | raise KeyError(f"Route with ID {route_id} does not exist")
193 | del self.routes[route_id]
194 |
195 | def __iter__(self):
196 | """Iterate over all routes."""
197 | return iter(self.routes.values())
198 |
199 | def __len__(self) -> int:
200 | """Get the number of routes in the collector."""
201 | return len(self.routes)
202 |
203 | def __str__(self) -> str:
204 | return f"RouteCollector(routes={len(self.routes)})"
205 |
206 | def __repr__(self) -> str:
207 | return f"RouteCollector(routes={list(self.routes.keys())})"
208 |
--------------------------------------------------------------------------------