Added sci-image

This commit is contained in:
Netkas 2021-01-12 22:08:17 -05:00
parent 6fe9db777d
commit 6348354e0d
930 changed files with 136189 additions and 2 deletions

View File

@ -4,7 +4,7 @@
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/services/corenlp/src" isTestSource="false" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="jdk" jdkName="Python 3.8" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="head.load" level="application" />
<orderEntry type="library" name="d3" level="application" />

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="15" project-jdk-type="JavaSDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8" project-jdk-type="Python SDK" />
</project>

81
deps/scikit-image/.appveyor.yml vendored Normal file
View File

@ -0,0 +1,81 @@
# AppVeyor.com is a Continuous Integration service to build and run tests under
# Windows
environment:
matrix:
- PYTHON: C:\Python37
- PYTHON: C:\Python37-x64
- PYTHON: C:\Python38
- PYTHON: C:\Python38-x64
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
PYTHON: C:\Python39
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
PYTHON: C:\Python39-x64
# build pre release packages on Python 3.8 since it has been out long
# enough for wheels to be built for packages that need to be compiled.
- PYTHON: C:\Python38-x64
PIP_FLAGS: --pre
matrix:
fast_finish: true
install:
- ECHO "Filesystem root:"
- ps: "ls \"C:/\""
# If there is a newer build queued for the same PR, cancel this one.
# The AppVeyor 'rollout builds' option is supposed to serve the same
# purpose but is problematic because it tends to cancel builds pushed
# directly to master instead of just PR builds.
# credits: JuliaLang developers.
- ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod `
https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | `
Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { `
throw "There are newer queued builds for this pull request, failing early." }
- "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%"
- "python -m pip install -U pip"
# Check that we have the expected version and architecture for Python
- "python --version"
- "python -c \"import struct; print(struct.calcsize('P') * 8)\""
- "pip --version"
# Install the build and runtime dependencies of the project.
- pip install %PIP_FLAGS% -r requirements/default.txt
- pip install %PIP_FLAGS% -r requirements/build.txt
- python setup.py bdist_wheel bdist_wininst
- ps: "ls dist"
# Install the generated wheel package to test it.
- pip install %PIP_FLAGS% --no-index --find-links dist/ scikit-image
# Install the test dependencies
- pip install %PIP_FLAGS% -r requirements/test.txt
# Not a .NET project, we build scikit-image in the install step instead
build: false
test_script:
## Build the docs
#- pip install sphinx pytest-runner sphinx-gallery
#- SET PYTHON=%PYTHON%\\python.exe && cd doc && make html
# Change to a non-source folder to make sure we run the tests on the
# installed library.
- "cd C:\\"
# Use the Agg backend in Matplotlib
- echo backend:Agg > matplotlibrc
# Run unit tests with pytest
- pytest -v --pyargs skimage
#artifacts:
# # Archive the generated wheel package in the ci.appveyor.com build report.
# - path: dist\*
#on_success:
# - TODO: upload the content of dist/*.whl to a public wheelhouse
cache:
# Avoid re-downloading large packages
- '%APPDATA%\pip\Cache'

44
deps/scikit-image/.binder/postBuild vendored Normal file
View File

@ -0,0 +1,44 @@
#!/bin/bash
set -e
# Taken from https://github.com/scikit-learn/scikit-learn/blob/72b3041ed57e42817e4c5c9853b3a2597cab3654/.binder/postBuild
# under BSD3 license, copyright the scikit-learn contributors
python -m pip install .
# This script is called in a binder context. When this script is called, we are
# inside a git checkout of the scikit-image/scikit-image repo. This script
# generates notebooks from the scikit-image python examples.
if [[ ! -f /.dockerenv ]]; then
echo "This script was written for repo2docker and is supposed to run inside a docker container."
echo "Exiting because this script can delete data if run outside of a docker container."
exit 1
fi
# Copy content we need from the scikit-image repo
TMP_CONTENT_DIR=/tmp/scikit-image
mkdir -p $TMP_CONTENT_DIR
cp -r doc/examples .binder $TMP_CONTENT_DIR
# delete everything in current directory including dot files and dot folders
# to create a "clean" experience for readers
find . -delete
# Generate notebooks and remove other files from examples folder
GENERATED_NOTEBOOKS_DIR=auto_examples
cp -r $TMP_CONTENT_DIR/examples $GENERATED_NOTEBOOKS_DIR
find $GENERATED_NOTEBOOKS_DIR -name '*.py' -exec sphx_glr_python_to_jupyter.py '{}' +
NON_NOTEBOOKS=$(find $GENERATED_NOTEBOOKS_DIR -type f | grep -v '\.ipynb')
rm -f $NON_NOTEBOOKS
# Modify path to be consistent by the path given by sphinx-gallery
mkdir notebooks
mv $GENERATED_NOTEBOOKS_DIR notebooks/
# Put the .binder folder back (may be useful for debugging purposes)
mv $TMP_CONTENT_DIR/.binder .
# Final clean up
rm -rf $TMP_CONTENT_DIR

View File

@ -0,0 +1,12 @@
-r ../requirements/default.txt
# numpy is present both in default.txt and build.txt
# with pinning instructions which are slightly different,
# therefore pip gives up and errors
Cython>=0.29.13
wheel
scikit-learn
dask[array]>=0.15.0
cloudpickle>=0.2.1
pandas>=0.23.0
seaborn>=0.7.1
sphinx-gallery

View File

@ -0,0 +1 @@
0/doc/build/html/index.html

51
deps/scikit-image/.circleci/config.yml vendored Normal file
View File

@ -0,0 +1,51 @@
version: 2
jobs:
doc:
docker:
- image: circleci/python:3.8.2
steps:
- checkout
- run:
name: concatenate requirement files
command: cat requirements/*.txt > requirements/all_requirements
- restore_cache:
keys:
- data-v1-{{ checksum "skimage/data/_registry.py" }}
- packages-v1-{{ checksum "requirements/all_requirements" }}
- run:
name: install dependencies and package
command: |
python -m venv skimage_venv
source skimage_venv/bin/activate
python -m pip install --upgrade pip
python -m pip install -r requirements/build.txt
python -m pip install -e . -vv
python -m pip install -r requirements/default.txt
python -m pip install -r requirements/docs.txt
export
- save_cache:
key: packages-v1-{{ checksum "requirements/all_requirements" }}
paths:
- skimage_venv
- run:
name: build doc
no_output_timeout: 50m
command: |
source skimage_venv/bin/activate
cd doc
make clean
SPHINXOPTS="-j 1" make html
- save_cache:
key: data-v1-{{ checksum "skimage/data/_registry.py" }}
paths:
- /home/circleci/.cache/scikit-image/master
- store_artifacts:
path: doc/build/html
destination: doc/build/html
workflows:
version: 2
build-doc:
jobs:
- doc

17
deps/scikit-image/.codecov.yml vendored Normal file
View File

@ -0,0 +1,17 @@
ignore:
- "skimage/_build.py"
- "scripts/skivi.py"
- "setup.py"
- "**/setup.py"
coverage:
status:
project:
default:
# Drops on the order 0.01% are typical even when no change occurs
# Having this threshold set a little higher (0.1%) than that makes it
# a little more tolerant to fluctuations
threshold: 0.1%
# Disable PR comment
comment: off

1
deps/scikit-image/.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1 @@
custom: https://numfocus.org/donate-to-scikit-image

View File

@ -0,0 +1,27 @@
## Description
<!--
(Note: for guidance on how to use `scikit-image`, please post instead on https://forum.image.sc/tag/scikit-image)
-->
## Way to reproduce
```python
# Place the full code we need to recreate your issue here
# upload all necessary images to github too!
```
## Version information
```python
# Paste the output of the following python commands
from __future__ import print_function
import sys; print(sys.version)
import platform; print(platform.platform())
import skimage; print("scikit-image version: {}".format(skimage.__version__))
import numpy; print("numpy version: {}".format(numpy.__version__))
```
```python
# your output here
```

View File

@ -0,0 +1,29 @@
## Description
<!-- If this is a bug-fix or enhancement, state the issue # it closes -->
<!-- If this is a new feature, reference what paper it implements. -->
## Checklist
<!-- It's fine to submit PRs which are a work in progress! -->
<!-- But before they are merged, all PRs should provide: -->
- [Docstrings for all functions](https://github.com/numpy/numpy/blob/master/doc/example.py)
- Gallery example in `./doc/examples` (new features only)
- Benchmark in `./benchmarks`, if your changes aren't covered by an
existing benchmark
- Unit tests
- Clean style in [the spirit of PEP8](https://www.python.org/dev/peps/pep-0008/)
<!-- For detailed information on these and other aspects see -->
<!-- the scikit-image contribution guidelines. -->
<!-- https://scikit-image.org/docs/dev/contribute.html -->
## For reviewers
<!-- Don't remove the checklist below. -->
- Check that the PR title is short, concise, and will make sense 1 year
later.
- Check that new functions are imported in corresponding `__init__.py`.
- Check that new features, API changes, and deprecations are mentioned in
`doc/release/release_dev.rst`.

View File

@ -0,0 +1,53 @@
on:
push:
branches:
- master
name: Build docs
jobs:
build:
name: Build docs
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Install Python
uses: actions/setup-python@v1
with:
python-version: 3.7
- name: Install dependencies
run: |
sudo apt install optipng
python -m pip install --upgrade pip
python -m pip install -r requirements/build.txt
python -m pip install -r requirements/default.txt
python -m pip install -r requirements/optional.txt
python -m pip install -r requirements/docs.txt
- name: Install package
run: |
python -m pip install .
- name: Push docs
run: |
eval "$(ssh-agent -s)"
ssh-add - <<< "${DEPLOY_KEY}"
# See https://github.community/t/github-actions-bot-email-address/17204/5
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions"
(cd doc && make html)
git clone --quiet --branch=gh-pages "${GH_REF}" doc_build
cd doc_build
git rm -r dev
cp -r ../doc/build/html dev
git add dev
git commit -m "Deployed to GitHub Pages"
git push --force --quiet "${GH_REF}" gh-pages
env:
GH_REF: git@github.com:scikit-image/docs.git
DEPLOY_KEY: ${{ secrets.DOC_DEPLOY_KEY_PRIVATE }}
GIT_SSH_COMMAND: "ssh -o StrictHostKeyChecking=no"
- name: Store docs as artifact
uses: actions/upload-artifact@v1
with:
name: docs
path: doc/build/html

View File

@ -0,0 +1,117 @@
name: Build CI wheels
on:
push:
branches:
- master
- v[0-9]+.[0-9]+.x
- cibuildwheel
tags:
- v*
jobs:
build_wheels:
name: Build wheels on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-18.04, windows-latest, macos-latest]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
name: Install Python
with:
python-version: '3.7'
- name: Install cibuildwheel
run: |
python -m pip install cibuildwheel==1.6.3
- name: Build wheels for CPython 3.9 and Mac OS
if: matrix.os == 'macos-latest'
run: |
brew install libomp
python -m cibuildwheel --output-dir dist
env:
CIBW_BUILD: "cp39-*"
CIBW_MANYLINUX_X86_64_IMAGE: manylinux1
CIBW_MANYLINUX_I686_IMAGE: manylinux1
# CIBW_BEFORE_BUILD: pip install certifi numpy==1.19.3
CC: /usr/bin/clang
CXX: /usr/bin/clang++
CPPFLAGS: "-Xpreprocessor -fopenmp"
CFLAGS: "-Wno-implicit-function-declaration -I/usr/local/opt/libomp/include"
CXXFLAGS: "-I/usr/local/opt/libomp/include"
LDFLAGS: "-Wl,-rpath,/usr/local/opt/libomp/lib -L/usr/local/opt/libomp/lib -lomp"
CIBW_TEST_REQUIRES: pytest pooch pytest-localserver pytest-faulthandler
CIBW_TEST_COMMAND: pytest --pyargs skimage
- name: Build wheels for CPython 3.9 (Linux and Windows)
if: matrix.os != 'macos-latest'
run: |
python -m cibuildwheel --output-dir dist
env:
CIBW_BUILD: "cp39-*"
CIBW_MANYLINUX_X86_64_IMAGE: manylinux1
CIBW_MANYLINUX_I686_IMAGE: manylinux1
# CIBW_BEFORE_BUILD: pip install certifi numpy==1.19.3
CIBW_TEST_REQUIRES: pytest pooch pytest-localserver pytest-faulthandler
CIBW_TEST_COMMAND: pytest --pyargs skimage
- name: Build wheels for CPython (MacOS)
if: matrix.os == 'macos-latest'
run: |
brew install libomp
python -m cibuildwheel --output-dir dist
env:
CIBW_BUILD: "cp3?-*"
CIBW_SKIP: "cp35-* cp36-* cp39-*"
CIBW_MANYLINUX_X86_64_IMAGE: manylinux1
CIBW_MANYLINUX_I686_IMAGE: manylinux1
# CIBW_BEFORE_BUILD: pip install certifi numpy==1.16
CC: /usr/bin/clang
CXX: /usr/bin/clang++
CPPFLAGS: "-Xpreprocessor -fopenmp"
CFLAGS: "-Wno-implicit-function-declaration -I/usr/local/opt/libomp/include"
CXXFLAGS: "-I/usr/local/opt/libomp/include"
LDFLAGS: "-Wl,-rpath,/usr/local/opt/libomp/lib -L/usr/local/opt/libomp/lib -lomp"
CIBW_TEST_REQUIRES: pytest pooch pytest-localserver pytest-faulthandler
CIBW_TEST_COMMAND: pytest --pyargs skimage
- name: Build wheels for CPython (Linux and Windows)
if: matrix.os != 'macos-latest'
run: |
python -m cibuildwheel --output-dir dist
env:
CIBW_BUILD: "cp3?-*"
CIBW_SKIP: "cp35-* cp36-* cp39-*"
CIBW_MANYLINUX_X86_64_IMAGE: manylinux1
CIBW_MANYLINUX_I686_IMAGE: manylinux1
# CIBW_BEFORE_BUILD: pip install certifi numpy==1.16
CIBW_TEST_REQUIRES: pytest pooch pytest-localserver pytest-faulthandler
CIBW_TEST_COMMAND: pytest --pyargs skimage
- name: Build wheels for CPython 3.6
run: |
python -m cibuildwheel --output-dir dist
env:
CIBW_BUILD: "cp36-*"
CIBW_MANYLINUX_X86_64_IMAGE: manylinux1
CIBW_MANYLINUX_I686_IMAGE: manylinux1
# CIBW_BEFORE_BUILD: pip install certifi numpy==1.16
if: >
startsWith(github.ref, 'refs/heads/v0.17') ||
startsWith(github.ref, 'refs/tags/v0.17')
- uses: actions/upload-artifact@v2
with:
name: wheels
path: ./dist/*.whl

View File

@ -0,0 +1,192 @@
# Workflow to build and test wheels
name: Test
on: [push, pull_request]
jobs:
test_skimage_linux:
name: linux-cp${{ matrix.python-version }}-${{ matrix.OPTIONS_NAME }}
runs-on: ubuntu-latest
env:
BUILD_DOCS: 1
TEST_EXAMPLES: 0
strategy:
# Ensure that a wheel builder finishes even if another fails
fail-fast: false
matrix:
python-version: [3.7, 3.8, 3.9]
PIP_FLAGS: [""]
MINIMUM_REQUIREMENTS: [0]
QT: ["PyQt5"]
WITH_PYSIDE: [0]
BUILD_DOCS: [1]
PYTHONOPTIMIZE: [0]
TEST_EXAMPLES: [0]
OPTIONAL_DEPS: [0]
OPTIONS_NAME: ["default"]
INSTALL_FROM_SDIST: [0]
include:
- platform_id: manylinux_x86_64
python-version: 3.7
MINIMUM_REQUIREMENTS: 1
BUILD_DOCS: 0
OPTIONAL_DEPS: 0
# Test miminum requirements (don't build docs there is a problem
# with dask and astropy)
OPTIONS_NAME: "mini-req"
- platform_id: manylinux_x86_64
python-version: 3.7
MINIMUM_REQUIREMENTS: 1
OPTIONAL_DEPS: 1
BUILD_DOCS: 0
OPTIONS_NAME: "mini-req-optional-deps"
- platform_id: manylinux_x86_64
python-version: 3.8
PIP_FLAGS: "--pre"
# test pre-releases
OPTIONS_NAME: "pre"
- platform_id: manylinux_x86_64
python-version: 3.7
BUILD_DOCS: 1
OPTIONAL_DEPS: 1
OPTIONS_NAME: "optional-deps"
- platform_id: manylinux_x86_64
python-version: 3.7
PYTHONOPTIMIZE: 2
BUILD_DOCS: 0
OPTIONS_NAME: "optimize"
- platform_id: manylinux_x86_64
python-version: 3.7
INSTALL_FROM_SDIST: 1
OPTIONS_NAME: "install-from-sdist"
steps:
- name: Checkout scikit-image
uses: actions/checkout@v1
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version}}
- name: Build package
env:
VERSION: ${{ matrix.python-version }}
PYTHONOPTIMIZE: ${{ matrix.PYTHONOPTIMIZE }}
MINIMUM_REQUIREMENTS: ${{ matrix.MINIMUM_REQUIREMENTS }}
PIP_FLAGS: ${{ matrix.PIP_FLAGS }}
INSTALL_FROM_SDIST: ${{ matrix.INSTALL_FROM_SDIST }}
run: |
set -ex
source tools/github/before_install.sh
python setup.py sdist
if [[ $INSTALL_FROM_SDIST ]]; then
pip uninstall cython -y;
pip install dist/scikit-image-*.tar.gz;
else
pip install -vv -e .;
fi
- name: Run tests
env:
PIP_FLAGS: ${{ matrix.PIP_FLAGS }}
MINIMUM_REQUIREMENTS: ${{ matrix.MINIMUM_REQUIREMENTS }}
QT: ${{ matrix.QT }}
WITH_PYSIDE: ${{ matrix.WITH_PYSIDE }}
BUILD_DOCS: ${{ matrix.BUILD_DOCS }}
TEST_EXAMPLES: ${{ matrix.TEST_EXAMPLES }}
OPTIONAL_DEPS: ${{ matrix.OPTIONAL_DEPS }}
run: |
echo $PIP_FLAGS
pip install $PIP_FLAGS -r requirements/test.txt
export MPL_DIR=`python -c 'import matplotlib; print(matplotlib.get_configdir())'`
mkdir -p ${MPL_DIR}
touch ${MPL_DIR}/matplotlibrc
if [[ "${OPTIONAL_DEPS}" == "1" ]]; then
pip install -r ./requirements/optional.txt
if [[ "${EXTRA_DEPS}" != "0" ]]; then
# Extra deps need compilation, and it may not always be possible to
# compile them easily on all platforms
pip install -r ./requirements/extras.txt
fi
fi
source tools/github/script.sh
test_skimage_macos:
name: macos-cp${{ matrix.python-version }}
runs-on: macos-latest
strategy:
# Ensure that a wheel builder finishes even if another fails
fail-fast: false
matrix:
python-version: [3.7, 3.8, 3.9]
QT: ["PyQt5"]
BUILD_DOCS: [1]
TEST_EXAMPLES: [0]
OPTIONAL_DEPS: [1]
OPTIONS_NAME: ["default"]
include:
- python-version: 3.7
OPTIONAL_DEPS: 0
- python-version: 3.9
OPTIONAL_DEPS: 0
exclude:
- python-version: 3.7
OPTIONAL_DEPS: 1
# Problem with SimpleITK and py 3.9
- python-version: 3.9
OPTIONAL_DEPS: 1
env:
BUILD_DOCS: 1
TEST_EXAMPLES: 0
CC: /usr/bin/clang
CXX: /usr/bin/clang++
CPPFLAGS: "-Xpreprocessor -fopenmp"
CFLAGS: "-Wno-implicit-function-declaration -I/usr/local/opt/libomp/include"
CXXFLAGS: "-I/usr/local/opt/libomp/include"
LDFLAGS: "-Wl,-rpath,/usr/local/opt/libomp/lib -L/usr/local/opt/libomp/lib -lomp"
steps:
- name: Checkout scikit-image
uses: actions/checkout@v1
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version}}
- name: OSX configuration
run: |
# OpenMP is not present on macOS by default
set -ex
brew install libomp
- name: Build package
run: |
set -ex
source tools/github/before_install.sh
python setup.py sdist
pip install -vv --no-build-isolation -e .;
- name: Run tests
run: |
pip install $PIP_FLAGS -r requirements/test.txt
export MPL_DIR=`python -c 'import matplotlib; print(matplotlib.get_configdir())'`
mkdir -p ${MPL_DIR}
touch ${MPL_DIR}/matplotlibrc
if [[ "${OPTIONAL_DEPS}" == "1" ]]; then
pip install -r ./requirements/optional.txt
pip install -r ./requirements/extras.txt
fi
source tools/github/script.sh

39
deps/scikit-image/.gitignore vendored Normal file
View File

@ -0,0 +1,39 @@
.cache
__pycache__
*.pyc
*~
*#
.#*
*egg-info
*.so
*.pyd
*.bak
*.c
*.cpp
*.new
*.md5
*.old
*.lprof
.DS_Store
.pytest_cache
.mypy_cache/
temp.tif
.ropeproject
doc/source/api
doc/build
source/api
build
dist
skimage/version.py
*.swp
.coverage
doc/source/auto_examples/**/*
doc/source/_static/random.js
.idea/
*.log
doc/release/_release_notes_for_docs.rst
doc/gh-pages
wheels
skimage/morphology/_skeletonize_3d_cy.pyx
.asv
.vscode/

3
deps/scikit-image/.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "doc/tools/gitwash"]
path = doc/tools/gitwash
url = git://github.com/matthew-brett/gitwash.git

19
deps/scikit-image/.mailmap vendored Normal file
View File

@ -0,0 +1,19 @@
K.-Michael Aye <michaelaye@users.noreply.github.com> <kmichael.aye@gmail.com>
Nelson Brown <nelson.brown@gmail.com> <nelson.a.brown@nasa.gov>
Luis Pedro Coelho <luis@luispedro.org> <lpc@cmu.edu>
Marianne Corvellec <marianne.corvellec@ens-lyon.org> <mcorvellec@april.org>
Riaan van den Dool <riaanvddool@gmail.com> <rvddool@csir.co.za>
Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org> <emma@aleph.(none)> <gouillar@epsilon.(none)> <emmanuelle.gouillart@nsup.org> <gouillar@aleph.(none)>
Thouis (Ray) Jones <thouis@gmail.com> <thouis@seas.harvard.edu>
Gregory R. Lee <gregory.lee@cchmc.org> <grlee77@gmail.com>
Andreas Mueller <amueller@ais.uni-bonn.de> <andreas@wuerl.net>
Juan Nunez-Iglesias <juan.n@unimelb.edu.au> <jni.soma@gmail.com> <jni@janelia.hhmi.org>
Nicolas Pinto <pinto@alum.mit.edu> <nicolas.pinto@gmail.com>
Johannes Schönberger <jsch@demuc.de> <ahojnnes@users.noreply.github.com> <hannesschoenberger@gmail.com> <jschoenberger@demuc.de>
Tim Sheerman-Chase <tim2009@sheerman-chase.org.uk> <t.sheerman-chase@surrey.ac.uk>
Matthew Trentacoste <trentaco@adobe.com> <web@matttrent.com>
James Turner <jturner@gemini.edu> <jehturner@yahoo.co.uk>
Stefan van der Walt <stefanv@berkeley.edu> <stefan@sun.ac.za> <github@mentat.za.net> <sjvdwalt@gmail.com>
John Wiggins <jwiggins@enthought.com> <john.wiggins@xfel.eu>
Tony S Yu <tyu@tony-yus-macbook.local> <tsyu80@gmail.com>
François Boulogne <fboulogne sciunto org> <devel@sciunto.org>

11
deps/scikit-image/.pep8speaks.yml vendored Normal file
View File

@ -0,0 +1,11 @@
scanner:
diff_only: True # Errors caused by only the patch are shown, not the whole file
pycodestyle:
ignore: # Errors and warnings to ignore
- W391 # blank line at the end of file
- E203 # whitespace before ,;:
- W503 # newline before binary operator
no_blank_comment: True # If True, no comment is made when the bot does not find any pep8 errors

1
deps/scikit-image/CODE_OF_CONDUCT.md vendored Normal file
View File

@ -0,0 +1 @@
[scikit-image Code of Conduct](doc/source/conduct/code_of_conduct.md)

649
deps/scikit-image/CONTRIBUTING.txt vendored Normal file
View File

@ -0,0 +1,649 @@
.. _howto_contribute:
How to contribute to scikit-image
=================================
Developing Open Source is great fun! Join us on the `scikit-image mailing
list <https://mail.python.org/mailman3/lists/scikit-image.python.org/>`_ and tell us
which of the following challenges you'd like to solve.
* Mentoring is available for those new to scientific programming in Python.
* If you're looking for something to implement or to fix, you can browse the
`open issues on GitHub <https://github.com/scikit-image/scikit-image/issues?q=is%3Aopen>`__.
* The technical detail of the `development process`_ is summed up below.
Refer to the :doc:`gitwash <gitwash/index>` for a step-by-step tutorial.
.. contents::
:local:
Development process
-------------------
Here's the long and short of it:
1. If you are a first-time contributor:
* Go to `https://github.com/scikit-image/scikit-image
<https://github.com/scikit-image/scikit-image>`_ and click the
"fork" button to create your own copy of the project.
* Clone the project to your local computer::
git clone https://github.com/your-username/scikit-image.git
* Change the directory::
cd scikit-image
* Add the upstream repository::
git remote add upstream https://github.com/scikit-image/scikit-image.git
* Now, you have remote repositories named:
- ``upstream``, which refers to the ``scikit-image`` repository
- ``origin``, which refers to your personal fork
.. note::
Although our code is hosted on `github
<https://github.com/scikit-image/>`_, our dataset is stored on `gitlab
<https://gitlab.com/scikit-image/data>`_ and fetched with `pooch
<https://github.com/fatiando/pooch>`_. New data must be submitted on
gitlab. Once merged, the data registry ``skimage/data/_registry.py``
in the main codebase on github must be updated.
2. Develop your contribution:
* Pull the latest changes from upstream::
git checkout master
git pull upstream master
* Create a branch for the feature you want to work on. Since the
branch name will appear in the merge message, use a sensible name
such as 'transform-speedups'::
git checkout -b transform-speedups
* Commit locally as you progress (``git add`` and ``git commit``)
3. To submit your contribution:
* Push your changes back to your fork on GitHub::
git push origin transform-speedups
* Enter your GitHub username and password (repeat contributors or advanced
users can remove this step by `connecting to GitHub with SSH
<https://help.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh>`_).
* Go to GitHub. The new branch will show up with a green Pull Request
button - click it.
* If you want, post on the `mailing list
<https://mail.python.org/mailman3/lists/scikit-image.python.org/>`_ to explain your changes or
to ask for review.
For a more detailed discussion, read these :doc:`detailed documents
<gitwash/index>` on how to use Git with ``scikit-image`` (:ref:`using-git`).
4. Review process:
* Reviewers (the other developers and interested community members) will
write inline and/or general comments on your Pull Request (PR) to help
you improve its implementation, documentation, and style. Every single
developer working on the project has their code reviewed, and we've come
to see it as a friendly conversation from which we all learn and the
overall code quality benefits. Therefore, please don't let the review
discourage you from contributing: its only aim is to improve the quality
of the project, not to criticize (we are, after all, very grateful for the
time you're donating!).
* To update your pull request, make your changes on your local repository
and commit. As soon as those changes are pushed up (to the same branch as
before) the pull request will update automatically.
* `Travis-CI <https://travis-ci.org/>`__, a continuous integration service,
is triggered after each Pull Request update to build the code, run unit
tests, measure code coverage and check coding style (PEP8) of your
branch. The Travis tests must pass before your PR can be merged. If
Travis fails, you can find out why by clicking on the "failed" icon (red
cross) and inspecting the build and test log.
* A pull request must be approved by two core team members before merging.
5. Document changes
If your change introduces any API modifications, please update
``doc/source/api_changes.txt``.
If your change introduces a deprecation, add a reminder to ``TODO.txt``
for the team to remove the deprecated functionality in the future.
.. note::
To reviewers: if it is not obvious from the PR description, add a short
explanation of what a branch did to the merge message and, if closing a
bug, also add "Closes #123" where 123 is the issue number.
Divergence between ``upstream master`` and your feature branch
--------------------------------------------------------------
If GitHub indicates that the branch of your Pull Request can no longer
be merged automatically, merge the master branch into yours::
git fetch upstream master
git merge upstream/master
If any conflicts occur, they need to be fixed before continuing. See
which files are in conflict using::
git status
Which displays a message like::
Unmerged paths:
(use "git add <file>..." to mark resolution)
both modified: file_with_conflict.txt
Inside the conflicted file, you'll find sections like these::
<<<<<<< HEAD
The way the text looks in your branch
=======
The way the text looks in the master branch
>>>>>>> master
Choose one version of the text that should be kept, and delete the
rest::
The way the text looks in your branch
Now, add the fixed file::
git add file_with_conflict.txt
Once you've fixed all merge conflicts, do::
git commit
.. note::
Advanced Git users are encouraged to `rebase instead of merge
<https://scikit-image.org/docs/dev/gitwash/development_workflow.html#rebasing-on-trunk>`__,
but we squash and merge most PRs either way.
Build environment setup
-----------------------
Please refer to :ref:`installing-scikit-image` for development installation
instructions.
Guidelines
----------
* All code should have tests (see `test coverage`_ below for more details).
* All code should be documented, to the same
`standard <https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard>`_ as NumPy and SciPy.
* For new functionality, always add an example to the gallery (see
:ref:`Sphinx-Gallery<sphinx_gallery>` below for more details).
* No changes are ever committed without review and approval by two core
team members. Ask on the
`mailing list <https://mail.python.org/mailman3/lists/scikit-image.python.org/>`_ if
you get no response to your pull request.
**Never merge your own pull request.**
Stylistic Guidelines
--------------------
* Set up your editor to remove trailing whitespace. Follow `PEP08
<https://www.python.org/dev/peps/pep-0008/>`__. Check code with pyflakes / flake8.
* Use numpy data types instead of strings (``np.uint8`` instead of
``"uint8"``).
* Use the following import conventions::
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
# only in Cython code
cimport numpy as cnp
cnp.import_array()
* When documenting array parameters, use ``image : (M, N) ndarray``
and then refer to ``M`` and ``N`` in the docstring, if necessary.
* Refer to array dimensions as (plane), row, column, not as x, y, z. See
:ref:`Coordinate conventions <numpy-images-coordinate-conventions>`
in the user guide for more information.
* Functions should support all input image dtypes. Use utility functions such
as ``img_as_float`` to help convert to an appropriate type. The output
format can be whatever is most efficient. This allows us to string together
several functions into a pipeline, e.g.::
hough(canny(my_image))
* Use ``Py_ssize_t`` as data type for all indexing, shape and size variables
in C/C++ and Cython code.
* Use relative module imports, i.e. ``from .._shared import xyz`` rather than
``from skimage._shared import xyz``.
* Wrap Cython code in a pure Python function, which defines the API. This
improves compatibility with code introspection tools, which are often not
aware of Cython code.
* For Cython functions, release the GIL whenever possible, using
``with nogil:``.
Testing
-------
See the testing section of the Installation guide.
Test coverage
-------------
Tests for a module should ideally cover all code in that module,
i.e., statement coverage should be at 100%.
To measure the test coverage, install
`pytest-cov <https://pytest-cov.readthedocs.io/en/latest/>`__
(using ``pip install pytest-cov``) and then run::
$ make coverage
This will print a report with one line for each file in `skimage`,
detailing the test coverage::
Name Stmts Exec Cover Missing
------------------------------------------------------------------------------
skimage/color/colorconv 77 77 100%
skimage/filter/__init__ 1 1 100%
...
Activate Travis-CI for your fork (optional)
-------------------------------------------
Travis-CI checks all unit tests in the project to prevent breakage.
Before sending a pull request, you may want to check that Travis-CI
successfully passes all tests. To do so,
* Go to `Travis-CI <https://travis-ci.org/>`__ and follow the Sign In link at
the top
* Go to your `profile page <https://travis-ci.org/profile>`__ and switch on
your scikit-image fork
It corresponds to steps one and two in
`Travis-CI documentation <https://docs.travis-ci.com/user/tutorial/#to-get-started-with-travis-ci-using-github>`__
(Step three is already done in scikit-image).
Thus, as soon as you push your code to your fork, it will trigger Travis-CI,
and you will receive an email notification when the process is done.
Every time Travis is triggered, it also calls on `Codecov
<https://codecov.io>`_ to inspect the current test overage.
Building docs
-------------
To build docs, run ``make`` from the ``doc`` directory. ``make help`` lists
all targets. For example, to build the HTML documentation, you can run:
.. code:: sh
make html
Then, all the HTML files will be generated in ``scikit-image/doc/build/html/``.
To rebuild a full clean documentation, run:
.. code:: sh
make clean
make html
Requirements
~~~~~~~~~~~~
`Sphinx <http://www.sphinx-doc.org/en/stable/>`_,
`Sphinx-Gallery <https://sphinx-gallery.github.io>`_,
and LaTeX are needed to build the documentation.
**Sphinx:**
Sphinx and other python packages needed to build the documentation
can be installed using: ``scikit-image/requirements/docs.txt`` file.
.. code:: sh
pip install -r requirements/docs.txt
.. _sphinx_gallery:
**Sphinx-Gallery:**
The above install command includes the installation of
`Sphinx-Gallery <https://sphinx-gallery.github.io>`_, which we use to create
the :ref:`examples_gallery`.
Refer to the Sphinx-Gallery documentation for complete instructions on syntax and usage.
If you are contributing an example to the gallery or editing an existing one,
build the docs (see above) and open a web browser to check how your edits
render at ``scikit-image/doc/build/html/auto_examples/``: navigate to the file
you have added or changed.
When adding an example, visit also
``scikit-image/doc/build/html/auto_examples/index.html`` to check how the new
thumbnail renders on the gallery's homepage. To change the thumbnail image,
please refer to `this section
<https://sphinx-gallery.github.io/stable/configuration.html#choosing-thumbnail>`_
of the Sphinx-Gallery docs.
Note that gallery examples should have a maximum figure width of 8 inches.
**LaTeX Ubuntu:**
.. code:: sh
sudo apt-get install -qq texlive texlive-latex-extra dvipng
**LaTeX Mac:**
Install the full `MacTex <https://www.tug.org/mactex/>`__ installation or
install the smaller
`BasicTex <https://www.tug.org/mactex/morepackages.html>`__ and add *ucs*
and *dvipng* packages:
.. code:: sh
sudo tlmgr install ucs dvipng
Fixing Warnings
~~~~~~~~~~~~~~~
- "citation not found: R###" There is probably an underscore after a
reference in the first line of a docstring (e.g. [1]\_). Use this
method to find the source file: $ cd doc/build; grep -rin R####
- "Duplicate citation R###, other instance in..."" There is probably a
[2] without a [1] in one of the docstrings
- Make sure to use pre-sphinxification paths to images (not the
\_images directory)
Auto-generating dev docs
~~~~~~~~~~~~~~~~~~~~~~~~
This set of instructions was used to create
scikit-image/tools/deploy-docs.sh
- Go to Github account settings -> personal access tokens
- Create a new token with access rights ``public_repo`` and
``user:email only``
- Install the travis command line tool: ``gem install travis``. On OSX,
you can get gem via ``brew install ruby``.
- Take then token generated by Github and run
``travis encrypt GH_TOKEN=<token>`` from inside a scikit-image repo
- Paste the output into the secure: field of ``.travis.yml``.
- The decrypted GH\_TOKEN env var will be available for travis scripts
https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
https://docs.travis-ci.com/user/encryption-keys/
Deprecation cycle
-----------------
If the behavior of the library has to be changed, a deprecation cycle must be
followed to warn users.
- a deprecation cycle is *not* necessary when:
* adding a new function, or
* adding a new keyword argument to the *end* of a function signature, or
* fixing what was buggy behavior
- a deprecation cycle is necessary for *any breaking API change*, meaning a
change where the function, invoked with the same arguments, would return a
different result after the change. This includes:
* changing the order of arguments or keyword arguments, or
* adding arguments or keyword arguments to a function, or
* changing a function's name or submodule, or
* changing the default value of a function's arguments.
Usually, our policy is to put in place a deprecation cycle over two releases.
For the sake of illustration, we consider the modification of a default value in
a function signature. In version N (therefore, next release will be N+1), we
have
.. code-block:: python
def a_function(image, rescale=True):
out = do_something(image, rescale=rescale)
return out
that has to be changed to
.. code-block:: python
def a_function(image, rescale=None):
if rescale is None:
warn('The default value of rescale will change '
'to `False` in version N+3.', stacklevel=2)
rescale = True
out = do_something(image, rescale=rescale)
return out
and in version N+3
.. code-block:: python
def a_function(image, rescale=False):
out = do_something(image, rescale=rescale)
return out
Here is the process for a 2-release deprecation cycle:
- In the signature, set default to `None`, and modify the docstring to specify
that it's `True`.
- In the function, _if_ rescale is set to `None`, set to `True` and warn that the
default will change to `False` in version N+3.
- In ``doc/release/release_dev.rst``, under deprecations, add "In
`a_function`, the `rescale` argument will default to `False` in N+3."
- In ``TODO.txt``, create an item in the section related to version N+3 and write
"change rescale default to False in a_function".
Note that the 2-release deprecation cycle is not a strict rule and in some
cases, the developers can agree on a different procedure upon justification
(like when we can't detect the change, or it involves moving or deleting an
entire function for example).
Scikit-image uses warnings to highlight changes in its API so that users may
update their code accordingly. The ``stacklevel`` argument sets the location in
the callstack where the warnings will point. In most cases, it is appropriate
to set the ``stacklevel`` to ``2``. When warnings originate from helper
routines internal to the scikit-image library, it is may be more appropriate to
set the ``stacklevel`` to ``3``. For more information, see the documentation of
the `warn <https://docs.python.org/3/library/warnings.html#warnings.warn>`__
function in the Python standard library.
To test if your warning is being emitted correctly, try calling the function
from an IPython console. It should point you to the console input itself
instead of being emitted by the files in the scikit-image library.
* **Good**: ``ipython:1: UserWarning: ...``
* **Bad**: ``scikit-image/skimage/measure/_structural_similarity.py:155: UserWarning:``
Bugs
----
Please `report bugs on GitHub <https://github.com/scikit-image/scikit-image/issues>`_.
Benchmarks
----------
While not mandatory for most pull requests, we ask that performance related
PRs include a benchmark in order to clearly depict the use-case that is being
optimized for. A historical view of our snapshots can be found on
at the following `website <https://pandas.pydata.org/speed/scikit-image/>`_.
In this section we will review how to setup the benchmarks,
and three commands ``asv dev``, ``asv run`` and ``asv continuous``.
Prerequisites
~~~~~~~~~~~~~
Begin by installing `airspeed velocity <https://asv.readthedocs.io/en/stable/>`_
in your development environment. Prior to installation, be sure to activate your
development environment, then if using ``venv`` you may install the requirement with::
source skimage-dev/bin/activate
pip install asv
If you are using conda, then the command::
conda activate skimage-dev
conda install asv
is more appropriate. Once installed, it is useful to run the command::
asv machine
To let airspeed velocity know more information about your machine.
Writing a benchmark
~~~~~~~~~~~~~~~~~~~
To write benchmark, add a file in the ``benchmarks`` directory which contains a
a class with one ``setup`` method and at least one method prefixed with ``time_``.
The ``time_`` method should only contain code you wish to benchmark.
Therefore it is useful to move everything that prepares the benchmark scenario
into the ``setup`` method. This function is called before calling a ``time_``
method and its execution time is not factored into the benchmarks.
Take for example the ``TransformSuite`` benchmark:
.. code-block:: python
import numpy as np
from skimage import transform
class TransformSuite:
"""Benchmark for transform routines in scikit-image."""
def setup(self):
self.image = np.zeros((2000, 2000))
idx = np.arange(500, 1500)
self.image[idx[::-1], idx] = 255
self.image[idx, idx] = 255
def time_hough_line(self):
result1, result2, result3 = transform.hough_line(self.image)
Here, the creation of the image is completed in the ``setup`` method, and not
included in the reported time of the benchmark.
It is also possible to benchmark features such as peak memory usage. To learn
more about the features of `asv`, please refer to the official
`airpseed velocity documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_.
Also, the benchmark files need to be importable when benchmarking old versions
of scikit-image. So if anything from scikit-image is imported at the top level,
it should be done as:
.. code-block:: python
try:
from skimage import metrics
except ImportError:
pass
The benchmarks themselves don't need any guarding against missing features,
only the top-level imports.
To allow tests of newer functions to be marked as "n/a" (not available)
rather than "failed" for older versions, the setup method itself can raise a
NotImplemented error. See the following example for the registration module:
.. code-block:: python
try:
from skimage import registration
except ImportError:
raise NotImplementedError("registration module not available")
Testing the benchmarks locally
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Prior to running the true benchmark, it is often worthwhile to test that the
code is free of typos. To do so, you may use the command::
asv dev -b TransformSuite
Where the ``TransformSuite`` above will be run once in your current environment
to test that everything is in order.
Running your benchmark
~~~~~~~~~~~~~~~~~~~~~~
The command above is fast, but doesn't test the performance of the code
adequately. To do that you may want to run the benchmark in your current
environment to see the performance of your change as you are developing new
features. The command ``asv run -E existing`` will specify that you wish to run
the benchmark in your existing environment. This will save a significant amount
of time since building scikit-image can be a time consuming task::
asv run -E existing -b TransformSuite
Comparing results to master
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Often, the goal of a PR is to compare the results of the modifications in terms
speed to a snapshot of the code that is in the master branch of the
``scikit-image`` repository. The command ``asv continuous`` is of help here::
asv continuous master -b TransformSuite
This call will build out the environments specified in the ``asv.conf.json``
file and compare the performance of the benchmark between your current commit
and the code in the master branch.
The output may look something like::
$ asv continuous master -b TransformSuite
· Creating environments
· Discovering benchmarks
·· Uninstalling from conda-py3.7-cython-numpy1.15-scipy
·· Installing 544c0fe3 <benchmark_docs> into conda-py3.7-cython-numpy1.15-scipy.
· Running 4 total benchmarks (2 commits * 2 environments * 1 benchmarks)
[ 0.00%] · For scikit-image commit 37c764cb <benchmark_docs~1> (round 1/2):
[...]
[100.00%] ··· ...ansform.TransformSuite.time_hough_line 33.2±2ms
BENCHMARKS NOT SIGNIFICANTLY CHANGED.
In this case, the differences between HEAD and master are not significant
enough for airspeed velocity to report.
It is also possible to get a comparison of results for two specific revisions
for which benchmark results have previously been run via the `asv compare`
command::
asv compare v0.14.5 v0.17.2
Finally, one can also run ASV benchmarks only for a specific commit hash or
release tag by appending ``^!`` to the commit or tag name. For example to run
the skimage.filter module benchmarks on release v0.17.2:
asv run -b Filter v0.17.2^!

309
deps/scikit-image/CONTRIBUTORS.txt vendored Normal file
View File

@ -0,0 +1,309 @@
# Acknowledgements
scikit-image is a joint effort, created by a large community of contributors.
For a full list of contributors, please visit
[our GitHub repo](https://github.com/scikit-image/scikit-image/graphs/contributors)
or use `git` in the source repository as follows:
```
git shortlog --summary --numbered
```
Previously, we asked authors to add their names to this file whenever
they made a contribution. Because these additions were not made
consistently, we now refer to the git commit log as the ultimate
record of code contribution.
Please note that, on a project as large as this, there are *many*
different ways to contribute, of which code is only one. Other
contributions include community & project management, code review,
answering questions on forums, and web design. We are grateful for
each and every contributor, regardless of their role.
## Historical credits list
- Stefan van der Walt
Project coordination
- Nicolas Pinto
Colour spaces and filters, and image resizing.
Shape views: ``util.shape.view_as_windows`` and ``util.shape.view_as_blocks``
Montage helpers: ``util.montage``.
- Damian Eads
Morphological operators
- Mahipal Raythattha
Documentation infrastructure
- S. Chris Colbert
OpenCV wrappers, Scivi, Qt and Gtk gui bits, fast Hough transform,
and much more.
- Holger Rapp
OpenCV functions and better OSX library loader
- Ralf Gommers
Image IO, color spaces, plots in documentation, cleaner API docs
- Helge Reikeras
Logic around API docs generation
- Tony Yu
Reading of paletted images; build, bug and doc fixes.
Code to generate skimage logo.
Otsu thresholding, histogram equalisation, template matching, and more.
- Zachary Pincus
Tracing of low cost paths, FreeImage I/O plugin, iso-contours,
and more.
- Almar Klein
Binary heap class and other improvements for graph algorithms
Lewiner variant of marching cubes algorithm
- Lee Kamentsky and Thouis Jones of the CellProfiler team, Broad Institute, MIT
Constant time per pixel median filter, edge detectors, and more.
- Dan Farmer
Incorporating CellProfiler's Canny edge detector, ctypes loader with Windows
support.
- Pieter Holtzhausen
Incorporating CellProfiler's Sobel edge detector, build and bug fixes.
Radon transform, template matching.
- Emmanuelle Gouillart
Total variation noise filtering, integration of CellProfiler's
mathematical morphology tools, random walker segmentation,
tutorials, and more.
- Maël Primet
Total variation noise filtering
- Martin Bergholdt
Fix missing math.h functions in Windows 7 + MSVCC.
- Neil Muller
Numerous fixes, including those for Python 3 compatibility,
QT image reading.
- The IPython team
From whom we borrowed the github+web tools / style.
- Kyle Mandli
CSV to ReST code for feature comparison table.
- The Scikit Learn team
From whom we borrowed the example generation tools.
- Andreas Mueller
Example data set loader. Nosetest compatibility functions.
Quickshift image segmentation, Felzenszwalbs fast graph based segmentation.
- Yaroslav Halchenko
For sharing his expert advice on Debian packaging.
- Brian Holt
Histograms of Oriented Gradients
- David-Warde Farley, Sturla Molden
Bresenheim line drawing, from snippets on numpy-discussion.
- Christoph Gohlke
Windows packaging and Python 3 compatibility.
- Neil Yager
Skeletonization and grey level co-occurrence matrices.
- Nelle Varoquaux
Renaming of the package to ``skimage``.
Harris corner detector
- W. Randolph Franklin
Point in polygon test.
- Gaël Varoquaux
Harris corner detector
- Nicolas Poilvert
Shape views: ``util.shape.view_as_windows`` and ``util.shape.view_as_blocks``
Image resizing.
- Johannes Schönberger
Drawing functions, adaptive thresholding, regionprops, geometric
transformations, LBPs, polygon approximations, web layout, and more.
- Pavel Campr
Fixes and tests for Histograms of Oriented Gradients.
- Joshua Warner
Multichannel random walker segmentation, unified peak finder backend,
n-dimensional array padding, marching cubes, bug and doc fixes.
- Petter Strandmark
Perimeter calculation in regionprops.
- Olivier Debeir
Rank filters (8- and 16-bits) using sliding window.
- Luis Pedro Coelho
imread plugin
- Steven Silvester, Karel Zuiderveld
Adaptive Histogram Equalization
- Anders Boesen Lindbo Larsen
Dense DAISY feature description, circle perimeter drawing.
- François Boulogne
Drawing: Andres Method for circle perimeter, ellipse perimeter,
Bezier curve, anti-aliasing.
Circular and elliptical Hough Transforms
Thresholding
Various fixes
- Thouis Jones
Vectorized operators for arrays of 16-bit ints.
- Xavier Moles Lopez
Color separation (color deconvolution) for several stainings.
- Jostein Bø Fløystad
Tomography: radon/iradon improvements and SART implementation
Phase unwrapping integration
- Matt Terry
Color difference functions
- Eugene Dvoretsky
Yen, Ridler-Calvard (ISODATA) threshold implementations.
- Riaan van den Dool
skimage.io plugin: GDAL
- Fedor Morozov
Drawing: Wu's anti-aliased circle
- Michael Hansen
novice submodule
- Munther Gdeisat
Phase unwrapping implementation
- Miguel Arevallilo Herraez
Phase unwrapping implementation
- Hussein Abdul-Rahman
Phase unwrapping implementation
- Gregor Thalhammer
Phase unwrapping integration
- François Orieux
Image deconvolution http://research.orieux.fr
- Vighnesh Birodkar
Blob Detection
- Axel Donath
Blob Detection
- Adam Feuer
PIL Image import and export improvements
- Rebecca Murphy
astronaut in examples
- Geoffrey French
skimage.filters.rank.windowed_histogram and plot_windowed_histogram example.
- Alexey Umnov
skimage.draw.ellipse bug fix and tests.
- Ivana Kajic
Updated description and examples in documentation for gabor filters
- Matěj Týč
Extended the image labelling implementation so it also works on 3D images.
- Salvatore Scaramuzzino
RectTool example
- Kevin Keraudren
Fix and test for feature.peak_local_max
- Jeremy Metz
Adaptation of ImageJ Autothresholder.Li, fixed Qhull error QH6228
- Mike Sarahan
Sub-pixel shift registration
- Jim Fienup, Alexander Iacchetta
In-depth review of sub-pixel shift registration
- Damian Eads
Structuring elements in morphology module.
- Egor Panfilov
Inpainting with biharmonic equation
- Evgeni Burovski
Adaptation of ImageJ 3D skeletonization algorithm.
- Alex Izvorski
Color spaces for YUV and related spaces
- Thomas Lewiner
Design and original implementation of the Lewiner marching cubes algorithm
- Jeff Hemmelgarn
Minimum threshold
- Kirill Malev
Frangi and Hessian filters implementation
- Abdeali Kothari
Alpha blending to convert from rgba to rgb
- Jeyson Molina
Niblack and Sauvola Local thresholding
- Scott Sievert
Wavelet denoising
- Gleb Goussarov
Chan-Vese Segmentation
- Kevin Mader
Montage improvements
- Matti Eskelinen
ImageCollection improvements
- David Volgyes
Unsharp masking
- Lars Grüter
Flood-fill based local maxima detection
- Solutus Immensus
Histogram matching
- Laurent P. René de Cotret
Implementation of masked image translation registration
- Mark Harfouche
Enabled GIL free operation of many algorithms implemented in Cython.
Maintenance of the build and test infrastructure.
- Taylor D. Scott
Simplified _upsampled_dft and extended register_translation to nD images.
- David J. Mellert
Polar and log-polar warping, nD windows
- Sebastian Wallkötter
morphology.rolling_ball and morphology.rolling_ellipsoid

509
deps/scikit-image/INSTALL.rst vendored Normal file
View File

@ -0,0 +1,509 @@
.. _installing-scikit-image:
Installing scikit-image
==============================================================================
How you should install ``scikit-image`` depends on your needs and skills:
- Simplest solution:
`scientific Python distribution <#scientific-python-distributions>`_.
- If you can install Python packages and work in virtual environments:
- `pip <#install-via-pip>`_
- `conda <#install-via-conda>`_
- Easy solution but with pitfalls: `system package manager <#system-package-managers>`_ (yum, apt, ...).
- `You're looking to contribute to scikit-image <#installing-scikit-image-for-contributors>`_.
Supported platforms
------------------------------------------------------------------------------
- Windows 64-bit on x86 processors
- Mac OS X on x86 processors
- Linux 64-bit on x86 processors
For information on other platforms, see `other platforms <#other-platforms>`_.
Version check
------------------------------------------------------------------------------
To see whether ``scikit-image`` is already installed or to check if an install has
worked, run the following in a Python shell or Jupyter notebook:
.. code-block:: python
import skimage
print(skimage.__version__)
or, from the command line:
.. code-block:: sh
python -c "import skimage; print(skimage.__version__)"
(Try ``python3`` if ``python`` is unsuccessful.)
You'll see the version number if ``scikit-image`` is installed and
an error message otherwise.
Scientific Python distributions
------------------------------------------------------------------------------
In a single install these give you Python,
``scikit-image`` and libraries it depends on, and other useful scientific
packages. They install into an isolated environment, so they won't conflict
with any existing installed programs.
Drawbacks are that the install can be large and you may not get
the most recent ``scikit-image``.
We recommend one of these distributions:
- `Anaconda <https://www.anaconda.com/distribution/>`_
- `Python(x,y) <https://python-xy.github.io/>`_
- `WinPython <https://winpython.github.io/>`_
When using the ``scikit-image``
documentation, make sure it's for the version you've installed (see
`Version check <#version-check>`_ above).
Installation via pip and conda
------------------------------------------------------------------------------
These install only ``scikit-image`` and its dependencies; pip has an option to
include related packages.
.. _install-via-pip:
pip
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Prerequisites to a pip install: You're able to use your system's command line to
install packages and are using a
`virtual environment
<https://towardsdatascience.com/virtual-environments-104c62d48c54?gi=2532aa12906#ee81>`_
(any of
`several
<https://stackoverflow.com/questions/41573587/what-is-the-difference-between-venv-pyvenv-pyenv-virtualenv-virtualenvwrappe>`_\
).
While it is possible to use pip without a virtual environment, it is not advised:
virtual environments create a clean Python environment that does not interfere
with any existing system installation, can be easily removed, and contain only
the package versions your application needs. They help avoid a common
challenge known as
`dependency hell <https://en.wikipedia.org/wiki/Dependency_hell>`_.
To install the current ``scikit-image`` you'll need at least Python 3.6. If
your Python is older, pip will find the most recent compatible version.
.. code-block:: sh
# Update pip
python -m pip install -U pip
# Install scikit-image
python -m pip install -U scikit-image
To include a selection of other scientific Python packages that expand
``scikit-image``'s capabilities to include, e.g., parallel processing, you
can install the package ``scikit-image[optional]``:
.. code-block:: sh
python -m pip install -U scikit-image[optional]
.. warning::
Please do not use the command ``sudo`` and ``pip`` together as ``pip`` may
overwrite critical system libraries which may require you to reinstall your
operating system.
.. _install-via-conda:
conda
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Miniconda is a bare-essentials version of the Anaconda package; you'll need to
install packages like ``scikit-image`` yourself. Like Anaconda, it installs
Python and provides virtual environments.
- `conda documentation <https://docs.conda.io>`_
- `Miniconda <https://docs.conda.io/en/latest/miniconda.html>`_
- `conda-forge <https://conda-forge.org>`_, a conda channel maintained
with the latest ``scikit-image`` package
Once you have your conda environment set up, you can install ``scikit-image``
with the command:
.. code-block:: sh
conda install scikit-image
System package managers
------------------------------------------------------------------------------
Using a package manager (``yum``, ``apt-get``, etc.) to install ``scikit-image``
or other Python packages is not your best option:
- You're likely to get an older version.
- You'll probably want to make updates and add new packages outside of
the package manager, leaving you with the same kind of
dependency conflicts you see when using pip without a virtual environment.
- There's an added risk because operating systems use Python, so if you
make system-wide Python changes (installing as root or using sudo),
you can break the operating system.
Downloading all demo datasets
------------------------------------------------------------------------------
Some of the data used in our examples is hosted online and is not installed
by default by the procedures explained above. Data are downloaded once, at the
first call, but this requires an internet connection. If you prefer downloading
all the demo datasets to be able to work offline, you can run this command:
.. code-block:: sh
python -c 'from skimage.data import download_all; download_all()'
or call ``download_all()`` in your favourite interactive Python environment
(IPython, Jupyter notebook, ...).
Other platforms
------------------------------------------------------------------------------
We still support Windows 32-bit on x86 processors but urge switching
to Windows 64-bit.
Unsupported platforms include:
1. Linux on 32-bit x86 processors.
2. Linux on 32-bit on ARM processors (Raspberry Pi running Raspbian):
- While we do not officially support this distribution, we point users to
`piwheels <https://wwww.piwheels.org>`_
and their
`scikit-image's specific page <https://www.piwheels.org/project/scikit-image/>`_.
- You may need to install additional system dependencies listed for
`imagecodecs <https://www.piwheels.org/project/imagecodecs/>`_.
See
`issue 4721 <https://github.com/scikit-image/scikit-image/issues/4721>`_.
3. Linux on 64-bit ARM processors (Nvidia Jetson):
- Follow the conversation on
`issue 4705 <https://github.com/scikit-image/scikit-image/issues/4705>`_.
Although these platforms lack official support, many of the core
developers have experience with them and can help with questions.
If you want to install on an unsupported platform, try
`building from source <#building-from-source>`_.
Tell us which other platforms you'd like to see ``scikit-image`` on!
We are very interested in how ``scikit-image`` gets
`used <https://github.com/scikit-image/scikit-image/issues/4375>`_.
If you'd like to package ``scikit-image`` for an as-yet-unsupported platform,
`reach out on GitHub <https://github.com/scikit-image/scikit-image/issues>`_.
Additional help
------------------------------------------------------------------------------
If you still have questions, reach out through
- our `forum on image.sc <https://forum.image.sc/tags/scikit-image>`_
- our `mailing list <https://mail.python.org/mailman3/lists/scikit-image.python.org/>`_
- our `chat channel <https://skimage.zulipchat.com/>`_
- `Stack Overflow <https://stackoverflow.com/questions/tagged/scikit-image>`_
To suggest a change in these instructions,
`please open an issue on GitHub <https://github.com/scikit-image/scikit-image/issues/new>`_.
Installing scikit-image for contributors
========================================
We are assuming that you have a default Python environment already configured on
your computer and that you intend to install ``scikit-image`` inside of it.
We also make a few more assumptions about your system:
- You have a C compiler set up.
- You have a C++ compiler set up.
- You are running a version of Python compatible with our system as listed
in our `setup.py file <https://github.com/scikit-image/scikit-image/blob/master/setup.py#L212>`_.
- You've cloned the git repository into a directory called ``scikit-image``.
You have set up the `upstream` remote to point to our repository and `origin`
to point to your fork.
This directory contains the following files:
.. code-block::
scikit-image
├── asv.conf.json
├── azure-pipelines.yml
├── benchmarks
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.txt
├── CONTRIBUTORS.txt
├── doc
├── INSTALL.rst
├── LICENSE.txt
├── Makefile
├── MANIFEST.in
├── README.md
├── RELEASE.txt
├── requirements
├── requirements.txt
├── setup.cfg
├── setup.py
├── skimage
├── TODO.txt
├── tools
└── viewer_examples
All commands below are assumed to be running from the ``scikit-image``
directory containing the files above.
Build environment setup
------------------------------------------------------------------------------
Once you've cloned your fork of the scikit-image repository,
you should set up a Python development environment tailored for scikit-image.
You may choose the environment manager of your choice.
Here we provide instructions for two popular environment managers:
``venv`` (pip based) and ``conda`` (Anaconda or Miniconda).
venv
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When using ``venv``, you may find the following bash commands useful:
.. code-block:: sh
# Create a virtualenv named ``skimage-dev``
python -m venv skimage-dev
# Activate it. On Linux and MacOS:
source skimage-dev/bin/activate
# Install all development and runtime dependencies of scikit-image
pip install -r <(cat requirements/*.txt)
# Build and install scikit-image from source
pip install -e . -vv
# Test your installation
pytest skimage
On Windows, please use ``skimage-dev\Scripts\activate`` on the activation step.
conda
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When using conda for development, we
recommend adding the conda-forge channel for the most up-to-date version
of many dependencies.
Some dependencies we use (for testing and documentation) are not available
from the default Anaconda channel. Please follow the official
`conda-forge installation instructions <https://conda-forge.org/#about>`_
before you get started.
.. code-block:: sh
# Create a conda environment named ``skimage-dev``
conda create --name skimage-dev
# Activate it
conda activate skimage-dev
# Install major development and runtime dependencies of scikit-image
conda install `for i in requirements/{default,build,test}.txt; do echo -n " --file $i "; done`
# Install scikit-image from source
pip install -e . -vv
# Test your installation
pytest skimage
Updating the installation
------------------------------------------------------------------------------
When updating your installation, it is often necessary to recompile submodules
that have changed. Do so with the following commands:
.. code-block:: sh
# Grab the latest source
git checkout master
git pull upstream master
# Update the installation
pip install -e . -vv
Testing
-------
``scikit-image`` has an extensive test suite that ensures correct
execution on your system. The test suite must pass before a pull
request can be merged, and tests should be added to cover any
modifications to the code base.
We use the `pytest <https://docs.pytest.org/en/latest/>`__
testing framework, with tests located in the various
``skimage/submodule/tests`` folders.
Our testing requirements are listed below:
.. include:: ../../requirements/test.txt
:literal:
Run all tests using:
.. code-block:: sh
pytest skimage
Or the tests for a specific submodule:
.. code-block:: sh
pytest skimage/morphology
Or tests from a specific file:
.. code-block:: sh
pytest skimage/morphology/tests/test_grey.py
Or a single test within that file:
.. code-block:: sh
pytest skimage/morphology/tests/test_grey.py::test_3d_fallback_black_tophat
Use ``--doctest-modules`` to run doctests. For example, run all tests and all
doctests using:
.. code-block:: sh
pytest --doctest-modules skimage
Warnings during testing phase
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Scikit-image tries to catch all warnings in its development builds to ensure
that crucial warnings from dependencies are not missed. This might cause
certain tests to fail if you are building scikit-image with versions of
dependencies that were not tested at the time of the release. To disable
failures on warnings, export the environment variable
``SKIMAGE_TEST_STRICT_WARNINGS`` with a value of `0` or `False` and run the
tests:
.. code-block:: sh
export SKIMAGE_TEST_STRICT_WARNINGS=False
pytest --pyargs skimage
Platform-specific notes
------------------------------------------------------------------------------
**Windows**
If you experience the error ``Error:unable to find vcvarsall.bat`` it means
that your computer does not have recommended compilers for Python. You can
either download and install Windows compilers from `here`_ or use
`MinGW compilers`_ . If using `MinGW`, make sure to correctly configure
``distutils`` by modifying (or create, if not existing) the configuration file
``distutils.cfg`` (located for example at
``C:\Python26\Lib\distutils\distutils.cfg``) to contain::
[build]
compiler=mingw32
A run-through of the compilation process for Windows is included in
our `setup of Azure Pipelines`_ (a continuous integration service).
.. _setup of Azure Pipelines: https://github.com/scikit-image/scikit-image/blob/master/azure-pipelines.yml
.. _here: https://wiki.python.org/moin/WindowsCompilers#Microsoft_Visual_C.2B-.2B-_14.0_standalone:_Visual_C.2B-.2B-_Build_Tools_2015_.28x86.2C_x64.2C_ARM.29
.. _MinGW compilers: http://www.mingw.org/wiki/howto_install_the_mingw_gcc_compiler_suite
**Debian and Ubuntu**
Install suitable compilers:
.. code-block:: sh
sudo apt-get install build-essential
Full requirements list
----------------------
**Build Requirements**
.. include:: ../../requirements/build.txt
:literal:
**Runtime Requirements**
.. include:: ../../requirements/default.txt
:literal:
**Test Requirements**
.. include:: ../../requirements/test.txt
:literal:
**Documentation Requirements**
.. include:: ../../requirements/docs.txt
:literal:
**Optional Requirements**
You can use ``scikit-image`` with the basic requirements listed above, but some
functionality is only available with the following installed:
* `SimpleITK <http://www.simpleitk.org/>`__
Optional I/O plugin providing a wide variety of `formats <https://itk.org/Wiki/ITK_File_Formats>`__.
including specialized formats using in medical imaging.
* `Astropy <https://www.astropy.org>`__
Provides FITS I/O capability.
* `PyQt5 <https://wiki.python.org/moin/PyQt>`__ or `PySide2 <https://wiki.qt.io/Qt_for_Python>`__ through `qtpy <https://github.com/spyder-ide/qtpy>`__
A ``Qt`` plugin will provide ``imshow(x, fancy=True)`` and `skivi`.
* `PyAMG <https://pyamg.org/>`__
The ``pyamg`` module is used for the fast ``cg_mg`` mode of random
walker segmentation.
* `Dask <https://dask.org/>`__
The ``dask`` module is used to speed up certain functions.
.. include:: ../../requirements/optional.txt
:literal:
**Extra Requirements**
These requirements have been included as a convenience, but are not widely
installable through PyPI on our supported platforms. As such, we keep them in
a separate list for more advanced members of our community to install.
* `imread <https://pythonhosted.org/imread/>`__
Optional I/O plugin providing most standard `formats <https://pythonhosted.org//imread/formats.html>`__.
.. include:: ../../requirements/extras.txt
:literal:
Help with contributor installation
------------------------------------------------------------------------------
See `Additional help <#additional-help>`_ above.

81
deps/scikit-image/LICENSE.txt vendored Normal file
View File

@ -0,0 +1,81 @@
Copyright (C) 2019, the scikit-image team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of skimage nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
skimage/_shared/version_requirements.py:_check_version
Copyright (c) 2013 The IPython Development Team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
skimage/_shared/version_requirements.py:is_installed:
Original Copyright (C) 2009-2011 Pierre Raybaut
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

22
deps/scikit-image/MANIFEST.in vendored Normal file
View File

@ -0,0 +1,22 @@
include setup*.py
include MANIFEST.in
include pyproject.toml
include *.md
include *.txt
include *.rst
recursive-include requirements *.txt
include requirements/README.md
include Makefile
include skimage/scripts/skivi
recursive-include skimage *.pyx *.pxd *.pxi *.py *.c *.h *.ini *.npy *.txt *.in *.cpp *.md
recursive-include skimage/data *
recursive-include skimage/*/tests/data *
include doc/Makefile
recursive-include doc/examples *.txt
recursive-include doc/source *.rst
recursive-include doc/tools *.txt
recursive-include doc/source/_templates *.html
recursive-include doc *.py
prune doc/build
prune doc/gh-pages

32
deps/scikit-image/Makefile vendored Normal file
View File

@ -0,0 +1,32 @@
.PHONY: all clean test
PYTHON ?= python
PYTEST ?= $(PYTHON) -m pytest
all:
$(PYTHON) setup.py build_ext --inplace
clean:
find . -name "*.so" -o -name "*.pyc" -o -name "*.md5" -o -name "*.pyd" | xargs rm -f
find . -name "*.pyx" -exec ./tools/rm_pyx_assoc_c_cpp.sh {} \;
rm -f MANIFEST
cleandoc:
rm -rf doc/build
test:
$(PYTEST) skimage --doctest-modules
doctest:
$(PYTHON) -c "import skimage, sys, io; sys.exit(skimage.doctest_verbose())"
benchmark_coverage:
$(PYTEST) benchmarks --cov=skimage --cov-config=setup.cfg
coverage: test_coverage
test_coverage:
$(PYTEST) -o python_functions=test_* skimage --cov=skimage
html:
pip install -q -r requirements/docs.txt
export SPHINXOPTS=-W; make -C doc html

86
deps/scikit-image/README.md vendored Normal file
View File

@ -0,0 +1,86 @@
# scikit-image: Image processing in Python
[![Image.sc forum](https://img.shields.io/badge/dynamic/json.svg?label=forum&url=https%3A%2F%2Fforum.image.sc%2Ftags%2Fscikit-image.json&query=%24.topic_list.tags.0.topic_count&colorB=brightgreen&suffix=%20topics&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAABPklEQVR42m3SyyqFURTA8Y2BER0TDyExZ+aSPIKUlPIITFzKeQWXwhBlQrmFgUzMMFLKZeguBu5y+//17dP3nc5vuPdee6299gohUYYaDGOyyACq4JmQVoFujOMR77hNfOAGM+hBOQqB9TjHD36xhAa04RCuuXeKOvwHVWIKL9jCK2bRiV284QgL8MwEjAneeo9VNOEaBhzALGtoRy02cIcWhE34jj5YxgW+E5Z4iTPkMYpPLCNY3hdOYEfNbKYdmNngZ1jyEzw7h7AIb3fRTQ95OAZ6yQpGYHMMtOTgouktYwxuXsHgWLLl+4x++Kx1FJrjLTagA77bTPvYgw1rRqY56e+w7GNYsqX6JfPwi7aR+Y5SA+BXtKIRfkfJAYgj14tpOF6+I46c4/cAM3UhM3JxyKsxiOIhH0IO6SH/A1Kb1WBeUjbkAAAAAElFTkSuQmCC)](https://forum.image.sc/tags/scikit-image)
[![Stackoverflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)](https://stackoverflow.com/questions/tagged/scikit-image)
[![project chat](https://img.shields.io/badge/zulip-join_chat-brightgreen.svg)](https://skimage.zulipchat.com)
[![codecov.io](https://codecov.io/github/scikit-image/scikit-image/coverage.svg?branch=master)](https://codecov.io/github/scikit-image/scikit-image?branch=master)
- **Website (including documentation):** [https://scikit-image.org/](https://scikit-image.org)
- **Mailing list:** [https://mail.python.org/mailman3/lists/scikit-image.python.org/](https://mail.python.org/mailman3/lists/scikit-image.python.org/)
- **Source:** [https://github.com/scikit-image/scikit-image](https://github.com/scikit-image/scikit-image)
- **Benchmarks:** [https://pandas.pydata.org/speed/scikit-image/](https://pandas.pydata.org/speed/scikit-image/)
## Installation from binaries
- **Debian/Ubuntu:** ``sudo apt-get install python-skimage``
- **OSX:** ``pip install scikit-image``
- **Anaconda:** ``conda install -c conda-forge scikit-image``
- **Windows:** Download [Windows binaries](http://www.lfd.uci.edu/~gohlke/pythonlibs/#scikit-image)
Also see [installing ``scikit-image``](INSTALL.rst).
## Installation from source
Install dependencies using:
```
pip install -r requirements.txt
```
Then, install scikit-image using:
```
$ pip install .
```
If you plan to develop the package, you may run it directly from source:
```
$ pip install -e . # Do this once to add package to Python path
```
Every time you modify Cython files, also run:
```
$ python setup.py build_ext -i # Build binary extensions
```
## License (Modified BSD)
Copyright (C) 2011, the scikit-image team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of skimage nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
## Citation
If you find this project useful, please cite:
> Stéfan van der Walt, Johannes L. Schönberger, Juan Nunez-Iglesias,
> François Boulogne, Joshua D. Warner, Neil Yager, Emmanuelle
> Gouillart, Tony Yu, and the scikit-image contributors.
> *scikit-image: Image processing in Python*. PeerJ 2:e453 (2014)
> https://doi.org/10.7717/peerj.453

303
deps/scikit-image/RELEASE.txt vendored Normal file
View File

@ -0,0 +1,303 @@
How to make a new release of ``skimage``
========================================
While following this guide, note down all the times that you need to
consult a previous release manager, or that you find an instruction
unclear. You will, of course, make a PR to update these notes after
you are done with the release! ;-)
Some of the instructions don't apply to patch releases. As a reminder, we use a
variant of "semantic versioning", where version numbers are classified as
v<major>.<minor>.<patch>. If you are making a patch release, skip the first
three steps. Instead, make a PR to update ``release_<major>.<minor>.txt`` on
the corresponding release branch.
Before you start, make sure you have all the required write
permissions (if not, you will need to ask an owner to grant you
access), specifically to:
- https://github.com/scikit-image/scikit-image-wheels
- https://pypi.org/project/scikit-image/
- https://github.com/scikit-image/scikit-image-web
- Check ``TODO.txt`` for any outstanding tasks.
- Branch v<major>.<minor>.x from master. This is the "release branch", where
you will make your changes gearing up for release, and cherry-pick them as
appropriate to master.
- On the release branch, update the release notes:
1. Review and cleanup ``doc/release/release_dev.rst``.
2. Make a list of merges, contributors, and reviewers by running
``tools/generate_release_notes.py -h`` and following that file's usage.
3. Paste this list at the end of the ``release_dev.txt``.
4. Scan the PR titles for highlights, deprecations, API changes,
and bugfixes, and mention these in the relevant sections of the notes.
Try to present the information in an expressive way by mentioning
the affected functions, elaborating on the changes and their
consequences. If possible, organize semantically close PRs in groups.
5. Check for duplicate names in the automatically generated list of
contributors and reviewers
6. Rename the file to ``doc/release/release_<major>.<minor>.txt``
7. Copy ``doc/release/release_template.txt`` to
``doc/release/release_dev.txt`` for the next release.
8. Copy relevant deprecations from ``release_<major>_<minor>.txt``
to ``release_dev.txt``.
- Submit the release notes for review by other project maintainers:
- Create a PR from v<major>.<minor>.x branch to master (at this point,
the difference should show the full contents of the release notes).
- Discuss with others, and make the changes directly to v<major>.<minor>.x branch.
- Once the consensus is found, ask the project maintainers to merge
the PR.
- Cherry pick the change onto the release branch.
- On the master branch, update the version number in ``skimage/__init__.py``
to the next ``.dev0`` version, commit, and push. This should follow PEP440
meaning that the appropriate version number would look something like
``0.20.0.dev0`` with the period between ``0`` and ``dev`` and a trailing
``0`` immediately after ``dev``. The final ``0`` is necessary to ensure
that
`NumpyVersion <https://github.com/scikit-image/scikit-image/pull/4947>`_
correctly interprets the version number.
- On the master branch, edit ``doc/source/_static/docversions.js``,
add the release, e.g., `0.15.x`, and commit. Cherry-pick this
change back to the release branch.
- Make sure you have all the required write permissions (if not, you will
need to ask Stéfan or Juan to grant you the access), specifically, to:
- https://github.com/scikit-image/scikit-image-wheels
- https://pypi.org/project/scikit-image/
- https://github.com/scikit-image/scikit-image-web
- Perform the following step first on a release candidate to check the wheels
are building fine. Then proceed with the release.
1. Back on the release branch, update the version number to stable in
``skimage/__init__.py``, commit, and push.
2. Add the version number as a tag in git::
git tag -s -m <github_release_message> [-u <key-id>] v<major>.<minor>.0
(If you do not have a GPG key, follow the tutorial to set it up:
https://help.github.com/articles/signing-commits-with-gpg/)
3. Push the new tag to GitHub::
git push upstream v<major>.<minor>.0<rcversion>
(where ``upstream`` is the name of the
``github.com:scikit-image/scikit-image`` repository.)
4. Build the package wheels (pre-compiled binaries) for various platforms:
- Clone https://github.com/scikit-image/scikit-image-wheels.
- If making a patch release, switch to the appropriate v<major>.<minor>.x
branch. Otherwise, create that branch.
- Update its ``.travis.yml`` file so that ``BUILD_COMMIT`` points to this
release tag (e.g. ``v0.14.0``).
- Update its ``.appveyor.yml`` file so that ``BUILD_COMMIT`` also points to
this release tag.
- Commit and push.
- Monitor the builds at
https://ci.appveyor.com/project/scikit-image/scikit-image-wheels and
https://travis-ci.org/scikit-image/scikit-image-wheels/branches
- If they fail, fix them through self-merging pull requests.
- Remove any build with none-supported Python versions if necessary.
- Add builds for new python version if necessary.
- When all is green, spot-check a couple of builds to make sure that the
wheels have correctly uploaded. You should see a message like:
``python -m wheelhouse_uploader upload --local-folder``, with output:
``Wheelhouse successfully published at:
http://[crazyrandomstring].rackcdn.com``
- Open that URL and check for the wheels as
``scikit_image-<versiontag>-<platforminfo>.whl``
- If you have performed a release candidate, redo the previous steps with the
final release.
- Spot-check a couple of builds to make sure that the
wheels have correctly uploaded. You should see a message like:
``python -m wheelhouse_uploader upload --local-folder``, with output:
``Wheelhouse successfully published at:
http://[crazyrandomstring].rackcdn.com``
- Open that URL and check for the wheels as
``scikit_image-<versiontag>-<platforminfo>.whl``
**Do not tag until the builds are all green.**
- Add the version number as a tag in git::
git tag -s -m <github_release_message> [-u <key-id>] v<major>.<minor>.0
(If you do not have a GPG key, follow the tutorial to set it up:
https://help.github.com/articles/signing-commits-with-gpg/)
- Push the new tag to GitHub::
git push upstream v<major>.<minor>.0
(where ``upstream`` is the name of the
``github.com:scikit-image/scikit-image`` repository.)
- Build the package wheels again, but change BUILD_COMMIT in both
Travis-CI and AppVeyor to the release version, e.g., ``v0.16.0``.
- Upload the wheels to PyPI:
- Make sure ``twine`` is available. You can install it with
``pip install twine``.
- Downlad ``wheel-uploader`` [1]_ and place it on your PATH.
- Make sure authentification with twine is working.
- See https://pypi.org/project/twine/
- Set your PyPI username in the environment variable TWINE_USERNAME
- Set your PyPI password in the environment variable TWINE_PASSWORD
- Run ``tools/upload_wheels.sh``.
- If everything worked, delete the wheels from your local directory with
``rm -rf *.whl``
.. [1] https://github.com/MacPython/terryfy/blob/master/wheel-uploader
- Publish the source distribution on PyPi::
python setup.py sdist
twine upload dist/scikit-image-<major>.<minor>.0.tar.gz
- Update the release docs:
- Edit ``doc/source/_static/docversions.js`` and commit
- On the release branch, build a clean version of the docs. In the
root directory, run ``pip install .``.
- In the ``doc/`` directory:
- Build using
``make clean; make html; make gh-pages``.
- Check (since this a new feature) that binder links in gallery examples
point to the release branch, e.g. `0.16.x`.
- In the ``gh-pages/`` directory:
- Update the symlink to ``stable`` and commit.
- Upload the docs: ``git push origin gh-pages`` in ``doc/gh-pages``.
- Update the web frontpage:
The webpage source is kept in a separate repo: `scikit-image-web`.
- Add release date to ``index.rst`` under "News".
- Add previous stable version documentation path to disallowed paths
in `robots.txt`
- Commit and push (this will also build and update the website).
- Post release notes on mailing lists, blog, Twitter, etc.
- scikit-image@python.org
- scipy-user@python.org
- scikit-learn@python.org
- Update the version and the release date on Wikipedia
https://en.wikipedia.org/wiki/Scikit-image
- Make a PR to scikit-image with any updates you might have to these notes
- If making a patch release (v<major>.<minor>.<patch>), forward-port the
release notes to the master branch and make a PR.
Conda-forge
-----------
**Note**: conda-forge now has an automated bot who makes the below PR for you.
Now all you have to do is to look at pull requests at
https://github.com/conda-forge/scikit-image-feedstock/pulls
and check for a new one for this version. Wait for all the continuous
integration checks to go green, then merge.
The manual instructions remain below in case the bot fails for some reason.
A scikit-image build recipe resides at
https://github.com/conda-forge/scikit-image-feedstock. You should update it to
point to the most recent release. You can do this by following these steps:
- Fork the repository at https://github.com/conda-forge/scikit-image-feedstock,
and clone it to your machine.
- Sprout a new branch, e.g. ``v<major>.<minor>``.
- Find out the SHA256 hash of the source distribution. You can find this at
https://pypi.org/project/scikit-image/, or use the following commands:
- ``sha256sum path/to/scikit-image-*.tar.gz`` (Linux)
- ``shasum -a 256 dist/scikit-image-*.tar.gz`` (macOS)
- ``CertUtil -hashfile dist\scikit-image-*.tar.gz SHA256`` (Windows)
- Edit the file ``recipe/meta.yaml``:
- Update the version number on the first line.
- Update the SHA256 value on line 9.
- If necessary, reset the build number to 0. (line 12)
- Update any requirements in the appropriate sections (build or run).
Note: don't remove ``numpy x.x``. This tells conda-smithy, conda-forge's
build system, that the library must be linked against NumPy at build time.
- Commit these changes.
- Update the infrastructure around the recipe with ``conda-smithy``:
* Install conda-smithy either with conda or pip
* Run ``conda-smithy rerender`` in the root of the repository, and commit
the changes.
- Push to your fork, and submit a pull request to the
upstream repo.
Debian
------
The below instructions remain here for completeness. However, the Debian
scientific team has kindly taken over package maintenance. Simply follow the
procedure described at https://www.debian.org/Bugs/Reporting to report a "bug"
that there is a new version of scikit-image out (specifying the version
number), with severity set to "Wishlist".
If you want to take matters into your own hands for some reason, follow the
instructions detailed below to cut a Debian release yourself.
- Tag the release as per instructions above.
- git checkout debian
- git merge v0.x.x
- uscan <- not sure if this step is necessary
- Update changelog (emacs has a good mode, requires package dpkg-dev-el)
- C-C C-v add new version, C-c C-c timestamp / save
- git commit -m 'Changelog entry for 0.x.x'
- git-buildpackage -uc -us -rfakeroot
- Sign the changes: debsign skimage_0.x.x-x_amd64.changes
- cd ../build-area && dput mentors skimage_0.x.x-x_amd64.changes
- The package should now be available at:
http://mentors.debian.net/package/skimage
For the last lines above to work, you need ``~/.gbp.conf``::
[DEFAULT]
upstream-tag = %(version)s
[git-buildpackage]
sign-tags = True
export-dir = ../build-area/
tarball-dir = ../tarballs/
As well as ``~/dput.cf``::
[mentors]
fqdn = mentors.debian.net
incoming = /upload
method = http
allow_unsigned_uploads = 0
progress_indicator = 2
# Allow uploads for UNRELEASED packages
allowed_distributions = .*

109
deps/scikit-image/TODO.txt vendored Normal file
View File

@ -0,0 +1,109 @@
Remember to list any API changes below in `doc/source/api_changes.txt`.
Version 0.17
------------
* Update RELEASE.txt regarding Azure Pipelines after making changes to
scikit-image-wheels repo.
* Finalize ``skimage.future.graph`` API.
* Finalize ``skimage.future.manual_segmentation`` API.
Version 0.18
------------
Version 0.19
------------
* Set ``start_label`` to 1 in ``slic`` and remove the DeprecationWarning.
* In ``skimage/draw/draw.py`` remove ``circle'' and related tests.
* In ``skimage/segmentation/morphsnakes.py`` remove ``circle_level_set'' and related tests.
* Also make sure to look in the function ``_init_level_set``
* In ``skimage/morphology/_flood_fill.py`` replace the deprecated parameter
in flood_fill() ``inplace`` with ``in_place`` and update the tests.
* In ``skimage/filters/rank/generic.py'' remove tophat and bottomhat (See #3614)
* Remove the definition of `skimage.util.pad` and
skimage/util/tests/test_arraypad.py.
* Remove the warnings in ``skimage/filters/ridges.py`` from sato and hessian
functions.
* In ``skimage/color/colorconv.py``, remove `rgb2grey` and `grey2rgb`.
* In ``skimage/color/colorconv.py``, remove the deprecation warnings
from `rgb2gray`.
* In ``skimage/transform/radon_transform.py``, remove ``deprate_kwarg``
decoration from ``iradon``.
* In ``skimage/measure/profile.py``, set default mode to 'reflect' in
`profile_line` and remove the deprecation warning.
* In ``skimage/_shared/utils.py``, raise a ValueError instead of the
warning when order > 0 and input array is bool in _set_order.
* In ``skimage/transform/_warps.py``, raise a ValueError instead of the
warning when anti_aliasing is True and input array is bool in resize.
* In ``skimage/feature/__init__.py``, remove `register_translation`.
* In ``skimage.measure._marching_cube_lewiner.py``, remove
``marching_cube_lewiner``.
* In ``skimage.measure._marching_cube_classic.py``, remove
``marching_cube_classic``.
* In ``skimage/color/colorconv.py``, remove the `alpha`and `is_rgb`
arguments and the associated deprecation warnings from `gray2rgb`.
* In ``skimage/color/colorlabel.py``, remove the `change_default_value`
decorator from `label2rgb` and set `bg_label` default value to 0.
* In ``skimage/feature/__init__.py``, remove `masked_register_translation`.
Version 0.20
------------
* In ``skimage/feature/peak.py``, remove the `indices` argument and
the decorator `remove_arg`.
* In ``skimage.measure._find_contours.py``, remove the `deprecate_kwarg`
decorator from `find_contours`.
* In ``skimage/feature/corner.py``, change `order` default to 'rc' in
`structure_tensor`.
* In ``skimage/feature/corner.py``, remove the `structure_tensor_eigvals`
function.
* Remove deprecated qt plugins, i.e. `qt_plugin.py`, `qt_plugin.ini` and
`skivi.py` in `skimage/io/_plugins`.
* Remove enrirely the folder skimage/scripts containing skivi app.
Remove `scripts` related lines in MANIFEST.in, .codecov.yml, appveyor.yml,
setup.py
* Remove viewer app in `skimage/viewer` and remove `./viewer_examples/`.
Remove `viewer_examples` occurences in `tools/check_sdist.py` and
`tools/travis/script.sh`
Version 1.0
-----------
* consider removing the argument `coordinates` in
`skimage.segmentation.active_contour`, which has not effect.
Other
-----
* Check whether imread wheels are available, then re-enable testing imread
on macOS. See https://github.com/scikit-image/scikit-image/pull/3898
* When ``numpy`` is set to > 1.16, one may simplify the implementation of
of `feature.blob_log` using the vectorized version of ``np.logspace``.
* Remove conditional import of ``scipy.fft`` in ``skimage._shared.fft`` once
the minimum supported version of ``scipy`` reaches 1.4.
* When ``numpy`` is set to >= 1.16, simplify ``draw.line_nd`` by using the
vectorized version of ``np.linspace``.
* Monitor when multibuild devel gets merged into master for python 3.8
compatibility https://github.com/matthew-brett/multibuild/issues/284
and update osx_install to point to the correct branch
* Remove pillow version related warning for MPO file format in
`io._plugins.pil_plugin.imread` when upgrading pillow min version to
6.0.0
* Remove pillow version related warning for CVE when pillow > 7.1.0 in
`skimage/io/_plugins/pil_plugin.py` and `skimage/io/collection.py`.
* When ``numpy`` is set to >= 1.16, remove the warning assertion in
``skimage/exposure/tests/test_exposure.py::test_rescale_nan_warning``
regarding ``invalid value encountered in reduce``.
regarding ``Passing `np.nan` to mean no clipping in np.clip``.
* Remove direct allocation of ``output`` from ``skimage/filters/_gaussian.py``,
when ``scipy`` upgrades to 1.1.
* Specify atol value in `cg` calls and remove condition on scipy's version in
``skimage/segmentation/random_walker_segmentation.py`` when ``scipy`` upgrades
to 1.1.
* Remove '--verify-repo=none' in tlmgr calls in ``tools/travis/osx_install.sh``,
when texlive 2020 is available.
* Once NumPy is set to >= 1.17, consider removing
``skimage.morphology._util._fast_pad``.
* When sphinx-gallery>=0.9.0, remove the thumbnail_size in
doc/source/conf.py as the default value will be comparable (#4801).

28
deps/scikit-image/asv.conf.json vendored Normal file
View File

@ -0,0 +1,28 @@
{
// The version of the config file format. Do not change, unless
// you know what you are doing.
"version": 1,
// For other options, see the documentation
// https://asv.readthedocs.io/en/stable/reference.html
"project": "scikit-image",
"project_url": "https://scikit-image.org/",
"repo": ".",
"branches": ["master"],
"dvcs": "git",
"environment_type": "conda",
"install_timeout": 1200,
"show_commit_url": "https://github.com/scikit-image/scikit-image/commit/",
"pythons": ["3.7"],
"matrix": {
"cython": [],
"numpy": ["1.15", "1.16"],
"scipy": []
},
"env_dir": ".asv/env",
"results_dir": ".asv/results",
"html_dir": ".asv/html"
}

168
deps/scikit-image/azure-pipelines.yml vendored Normal file
View File

@ -0,0 +1,168 @@
# Azure Pipelines configuration file for Continuous Integration
# for building the package and running the tests under Windows.
jobs:
- job: 'Default'
pool:
vmImage: 'vs2017-win2016'
strategy:
maxParallel: 10
matrix:
Python37:
PYTHON_VERSION: '3.7'
ARCH: 'x86'
PIP_FLAGS: ''
Python37-x64:
PYTHON_VERSION: '3.7'
ARCH: 'x64'
PIP_FLAGS: ''
TEST_EXAMPLES: 'true'
Python38:
PYTHON_VERSION: '3.8'
ARCH: 'x86'
PIP_FLAGS: ''
Python38-x64:
PYTHON_VERSION: '3.8'
ARCH: 'x64'
PIP_FLAGS: ''
TEST_EXAMPLES: 'true'
BUILD_DOCS: 'true'
# build pre release packages on Python 3.8 since it has been out long
# enough for wheels to be built for packages that need to be compiled.
Python38-x64-pre:
PYTHON_VERSION: '3.8'
ARCH: 'x64'
PIP_FLAGS: '--pre'
Python39:
PYTHON_VERSION: '3.9'
ARCH: 'x86'
PIP_FLAGS: ''
Python39-x64:
PYTHON_VERSION: '3.9'
ARCH: 'x64'
PIP_FLAGS: ''
# NOTE(honles): winpty is required by one of the doc dependencies,
# but does not provide a wheel for Python 3.9
# TEST_EXAMPLES: 'true'
continueOnError: false
timeoutInMinutes: 60
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '$(PYTHON_VERSION)'
architecture: '$(ARCH)'
name: python
- bash: |
set -ex
PYTHON="$(python.pythonLocation)\\python.exe"
# Update pip
$PYTHON -m pip install -U pip setuptools wheel
# Check that we have the expected version and architecture for Python
$PYTHON --version
$PYTHON -m pip --version
$PYTHON -c "import struct; print('Void pointer width is', struct.calcsize('P') * 8)"
$PYTHON -m pip list
# Install the build and runtime dependencies of the project
$PYTHON -m pip install ${PIP_FLAGS} -r requirements/build.txt
# Disable C99 complex if PyWavelets needs to be built from source.
# The compiler used will be MSVC, but C99 may be detected improperly
USE_C99_COMPLEX=0 $PYTHON -m pip install ${PIP_FLAGS} -r requirements/default.txt
$PYTHON -m pip list
displayName: 'Pre-installation'
- bash: |
set -ex
PYTHON="$(python.pythonLocation)\\python.exe"
# Compile the package and build the wheel
$PYTHON setup.py bdist_wheel
# Install the generated wheel package
ls dist
$PYTHON -m pip install ${PIP_FLAGS} --no-index --find-links dist/ scikit-image
displayName: 'Installation'
- bash: |
set -ex
PYTHON="$(python.pythonLocation)\\python.exe"
# Install the test dependencies
$PYTHON -m pip install ${PIP_FLAGS} -r requirements/test.txt
$PYTHON -m pip list
# Set non-UI Matplotlib backend
cd ${AGENT_BUILDDIRECTORY} # D:\a\1
echo "backend : Agg" > matplotlibrc
displayName: 'Pre-testing'
- bash: |
set -ex
PYTHON="$(python.pythonLocation)\\python.exe"
# Change the working directory in order to run the tests
# on the installed version of skimage
cd ${AGENT_BUILDDIRECTORY} # D:\a\1
# Show the info about the installed scikit-image
$PYTHON -c "import skimage; print(skimage.__path__)"
# Force matplotlib to use the prepared config
export MATPLOTLIBRC=${AGENT_BUILDDIRECTORY}
# Run unit tests with pytest
# We don't test docstring examples (--doctest-modules) on
# Windows due to inconsistent ndarray formatting in `numpy`.
# For more details, see https://github.com/numpy/numpy/issues/13468
export TEST_ARGS="-v --cov=skimage"
$PYTHON -m pytest ${TEST_ARGS} --pyargs skimage
displayName: 'Package testing'
- bash: |
set -ex
export PYTHON="$(python.pythonLocation)\\python.exe"
# Install the doc dependencies
$PYTHON -m pip install ${PIP_FLAGS} -r requirements/docs.txt
$PYTHON -m pip list
# Build the documentation
choco install optipng
export SPHINXCACHE=${AGENT_BUILDDIRECTORY}/.cache/sphinx
export SPHINXOPTS=-W
make -C doc html
condition: eq(variables['BUILD_DOCS'], 'true')
displayName: 'Documentation testing'
- bash: |
set -ex
PYTHON="$(python.pythonLocation)\\python.exe"
# Install the doc dependencies
$PYTHON -m pip install ${PIP_FLAGS} -r requirements/docs.txt
$PYTHON -m pip list
# Force matplotlib to use the prepared config
export MATPLOTLIBRC=${AGENT_BUILDDIRECTORY}
# Run example applications
for f in doc/examples/*/*.py; do
$PYTHON "${f}"
if [ $? -ne 0 ]; then
exit 1
fi
done
condition: eq(variables['TEST_EXAMPLES'], 'true')
displayName: 'Gallery testing'
# - bash: |
# # -- Publish the .whl artifacts
# # -- Upload the content of dist/*.whl to a public wheelhouse
# displayName: 'Further consideration'

View File

View File

@ -0,0 +1,39 @@
# See "Writing benchmarks" in the asv docs for more information.
# https://asv.readthedocs.io/en/latest/writing_benchmarks.html
import numpy as np
from skimage import data, img_as_float
from skimage.transform import rescale
from skimage import exposure
class ExposureSuite:
"""Benchmark for exposure routines in scikit-image."""
def setup(self):
self.image_u8 = data.moon()
self.image = img_as_float(self.image_u8)
self.image = rescale(self.image, 2.0, anti_aliasing=False)
# for Contrast stretching
self.p2, self.p98 = np.percentile(self.image, (2, 98))
def time_equalize_hist(self):
# Run 10x to average out performance
# note that this is not needed as asv does this kind of averaging by
# default, but this loop remains here to maintain benchmark continuity
for i in range(10):
result = exposure.equalize_hist(self.image)
def time_equalize_adapthist(self):
result = exposure.equalize_adapthist(self.image, clip_limit=0.03)
def time_rescale_intensity(self):
result = exposure.rescale_intensity(self.image,
in_range=(self.p2, self.p98))
def time_histogram(self):
# Running it 10 times to achieve significant performance time.
for i in range(10):
result = exposure.histogram(self.image)
def time_gamma_adjust_u8(self):
for i in range(10):
_ = exposure.adjust_gamma(self.image_u8)

View File

@ -0,0 +1,26 @@
# See "Writing benchmarks" in the asv docs for more information.
# https://asv.readthedocs.io/en/latest/writing_benchmarks.html
import numpy as np
from scipy import ndimage as ndi
from skimage import feature, util
class FeatureSuite:
"""Benchmark for feature routines in scikit-image."""
def setup(self):
self.image = np.zeros((640, 640))
self.image[320:-320, 320:-320] = 1
self.image = ndi.rotate(self.image, 15, mode='constant')
self.image = ndi.gaussian_filter(self.image, 4)
self.image += 0.2 * np.random.random(self.image.shape)
self.image_ubyte = util.img_as_ubyte(np.clip(self.image, 0, 1))
def time_canny(self):
result = feature.canny(self.image)
def time_glcm(self):
pi = np.pi
result = feature.greycomatrix(self.image_ubyte, distances=[1, 2],
angles=[0, pi/4, pi/2, 3*pi/4])

View File

@ -0,0 +1,85 @@
# See "Writing benchmarks" in the asv docs for more information.
# https://asv.readthedocs.io/en/latest/writing_benchmarks.html
import numpy as np
from skimage import data, filters
class FiltersSuite:
"""Benchmark for filter routines in scikit-image."""
def setup(self):
self.image = np.random.random((4000, 4000))
self.image[:2000, :2000] += 1
self.image[3000:, 3000] += 0.5
def time_sobel(self):
filters.sobel(self.image)
class FiltersSobel3D:
"""Benchmark for 3d sobel filters."""
def setup(self):
try:
filters.sobel(np.ones((8, 8, 8)))
except ValueError:
raise NotImplementedError("3d sobel unavailable")
self.image3d = data.binary_blobs(length=256, n_dim=3).astype(float)
def time_sobel_3d(self):
_ = filters.sobel(self.image3d)
class MultiOtsu(object):
"""Benchmarks for MultiOtsu threshold."""
param_names = ['classes']
params = [3, 4, 5]
def setup(self, *args):
try:
from skimage.filters import threshold_multiotsu
except ImportError:
raise NotImplementedError("threshold_multiotsu unavailable")
self.image = data.camera()
def time_threshold_multiotsu(self, classes):
filters.threshold_multiotsu(self.image, classes=classes)
def peakmem_reference(self, *args):
"""Provide reference for memory measurement with empty benchmark.
Peakmem benchmarks measure the maximum amount of RAM used by a
function. However, this maximum also includes the memory used
during the setup routine (as of asv 0.2.1; see [1]_).
Measuring an empty peakmem function might allow us to disambiguate
between the memory used by setup and the memory used by target (see
other ``peakmem_`` functions below).
References
----------
.. [1]: https://asv.readthedocs.io/en/stable/writing_benchmarks.html#peak-memory
"""
pass
def peakmem_threshold_multiotsu(self, classes):
filters.threshold_multiotsu(self.image, classes=classes)
class ThresholdSauvolaSuite:
"""Benchmark for transform routines in scikit-image."""
def setup(self):
self.image = np.zeros((2000, 2000), dtype=np.uint8)
self.image3D = np.zeros((30, 300, 300), dtype=np.uint8)
idx = np.arange(500, 700)
idx3D = np.arange(10, 200)
self.image[idx[::-1], idx] = 255
self.image[idx, idx] = 255
self.image3D[:, idx3D[::-1], idx3D] = 255
self.image3D[:, idx3D, idx3D] = 255
def time_sauvola(self):
result = filters.threshold_sauvola(self.image, window_size=51)
def time_sauvola_3d(self):
result = filters.threshold_sauvola(self.image3D, window_size=51)

View File

@ -0,0 +1,20 @@
from subprocess import run, PIPE
from sys import executable
class ImportSuite:
"""Benchmark the time it takes to import various modules"""
params = [
'numpy',
'skimage',
'skimage.feature',
'skimage.morphology',
'skimage.color',
]
param_names = ["package_name"]
def setup(self, package_name):
pass
def time_import(self, package_name):
results = run(executable + ' -c "import ' + package_name + '"',
stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=True)

View File

@ -0,0 +1,26 @@
import numpy as np
# guard against import of a non-existant metrics module in older skimage
try:
from skimage import metrics
except ImportError:
pass
class SetMetricsSuite(object):
shape = (6, 6)
coords_a = np.zeros(shape, dtype=bool)
coords_b = np.zeros(shape, dtype=bool)
def setup(self):
try:
from skimage.metrics import hausdorff_distance
except ImportError:
raise NotImplementedError("hausdorff_distance unavailable")
points_a = (1, 0)
points_b = (5, 2)
self.coords_a[points_a] = True
self.coords_b[points_b] = True
def time_hausdorff(self):
metrics.hausdorff_distance(self.coords_a, self.coords_b)

View File

@ -0,0 +1,78 @@
"""Benchmarks for `skimage.morphology`."""
import numpy as np
from numpy.lib import NumpyVersion as Version
import skimage
from skimage import data, filters, morphology, util
class Watershed(object):
param_names = ["seed_count", "connectivity", "compactness"]
params = [(5, 500), (1, 2), (0, 0.01)]
def setup(self, *args):
self.image = filters.sobel(data.coins())
def time_watershed(self, seed_count, connectivity, compactness):
morphology.watershed(self.image, seed_count, connectivity,
compactness=compactness)
def peakmem_reference(self, *args):
"""Provide reference for memory measurement with empty benchmark.
Peakmem benchmarks measure the maximum amount of RAM used by a
function. However, this maximum also includes the memory used
during the setup routine (as of asv 0.2.1; see [1]_).
Measuring an empty peakmem function might allow us to disambiguate
between the memory used by setup and the memory used by target (see
other ``peakmem_`` functions below).
References
----------
.. [1]: https://asv.readthedocs.io/en/stable/writing_benchmarks.html#peak-memory
"""
pass
def peakmem_watershed(self, seed_count, connectivity, compactness):
morphology.watershed(self.image, seed_count, connectivity,
compactness=compactness)
class Skeletonize3d(object):
def setup(self, *args):
try:
# use a separate skeletonize_3d function on older scikit-image
if Version(skimage.__version__) < Version('0.16.0'):
self.skeletonize = morphology.skeletonize_3d
else:
self.skeletonize = morphology.skeletonize
except AttributeError:
raise NotImplementedError("3d skeletonize unavailable")
# we stack the horse data 5 times to get an example volume
self.image = np.stack(5 * [util.invert(data.horse())])
def time_skeletonize_3d(self):
self.skeletonize(self.image)
def peakmem_reference(self, *args):
"""Provide reference for memory measurement with empty benchmark.
Peakmem benchmarks measure the maximum amount of RAM used by a
function. However, this maximum also includes the memory used
during the setup routine (as of asv 0.2.1; see [1]_).
Measuring an empty peakmem function might allow us to disambiguate
between the memory used by setup and the memory used by target (see
other ``peakmem_`` functions below).
References
----------
.. [1]: https://asv.readthedocs.io/en/stable/writing_benchmarks.html#peak-memory
"""
pass
def peakmem_skeletonize_3d(self):
self.skeletonize(self.image)

View File

@ -0,0 +1,24 @@
import numpy as np
from scipy import ndimage as ndi
from skimage.feature import peak_local_max
class PeakLocalMaxSuite(object):
def setup(self):
mask = np.zeros([500, 500], dtype=bool)
x, y = np.indices((500, 500))
x_c = x // 20 * 20 + 10
y_c = y // 20 * 20 + 10
mask[(x - x_c)**2 + (y - y_c)**2 < 8**2] = True
# create a mask, label each disk,
self.labels, num_objs = ndi.label(mask)
# create distance image for peak searching
self.dist = ndi.distance_transform_edt(mask)
def time_peak_local_max(self):
local_max = peak_local_max(
self.dist, labels=self.labels,
min_distance=20, indices=False, exclude_border=False)

View File

@ -0,0 +1,31 @@
import numpy as np
from skimage.filters import rank
from skimage.filters.rank import __all__ as all_rank_filters
from skimage.filters.rank import __3Dfilters as all_3d_rank_filters
from skimage.morphology import disk, ball
class RankSuite(object):
param_names = ["filter_func", "shape"]
params = [sorted(all_rank_filters), [(32, 32), (256, 256)]]
def setup(self, filter_func, shape):
self.image = np.random.randint(0, 255, size=shape, dtype=np.uint8)
self.selem = disk(1)
def time_filter(self, filter_func, shape):
getattr(rank, filter_func)(self.image, self.selem)
class Rank3DSuite(object):
param_names = ["filter3d", "shape3d"]
params = [sorted(all_3d_rank_filters), [(32, 32, 32), (128, 128, 128)]]
def setup(self, filter3d, shape3d):
self.volume = np.random.randint(0, 255, size=shape3d, dtype=np.uint8)
self.selem_3d = ball(1)
def time_3d_filters(self, filter3d, shape3d):
getattr(rank, filter3d)(self.volume, self.selem_3d)

View File

@ -0,0 +1,81 @@
import numpy as np
from scipy import ndimage as ndi
from skimage.color import rgb2gray
from skimage import data, img_as_float
# guard against import of a non-existant registration module in older skimage
try:
from skimage import registration
except ImportError:
pass
# deal with move and rename of phase_cross_correlation across versions
try:
from skimage.registration import phase_cross_correlation
except ImportError:
try:
from skimage.feature import register_translation
phase_cross_correlation = register_translation
except ImportError:
phase_cross_correlation = None
class RegistrationSuite(object):
"""Benchmark for registration routines in scikit-image."""
def setup(self):
try:
from skimage.registration import optical_flow_tvl1
except ImportError:
raise NotImplementedError("optical_flow_tvl1 unavailable")
I0, I1, _ = data.stereo_motorcycle()
self.I0 = rgb2gray(I0)
self.I1 = rgb2gray(I1)
def time_tvl1(self):
registration.optical_flow_tvl1(self.I0, self.I1)
def time_ilk(self):
registration.optical_flow_ilk(self.I0, self.I1)
class PhaseCrossCorrelationRegistration:
"""Benchmarks for registration.phase_cross_correlation in scikit-image"""
param_names = ["ndims", "image_size", "upscale_factor"]
params = [(2, 3), (32, 100), (1, 5, 10)]
def setup(self, ndims, image_size, upscale_factor, *args):
if phase_cross_correlation is None:
raise NotImplementedError("phase_cross_correlation unavailable")
shifts = (-2.3, 1.7, 5.4, -3.2)[:ndims]
phantom = img_as_float(
data.binary_blobs(length=image_size, n_dim=ndims))
self.reference_image = np.fft.fftn(phantom)
self.shifted_image = ndi.fourier_shift(self.reference_image, shifts)
def time_phase_cross_correlation(self, ndims, image_size, upscale_factor):
result = phase_cross_correlation(self.reference_image,
self.shifted_image,
upscale_factor,
space="fourier")
def peakmem_reference(self, *args):
"""Provide reference for memory measurement with empty benchmark.
Peakmem benchmarks measure the maximum amount of RAM used by a
function. However, this maximum also includes the memory used
during the setup routine (as of asv 0.2.1; see [1]_).
Measuring an empty peakmem function might allow us to disambiguate
between the memory used by setup and the memory used by target (see
other ``peakmem_`` functions below).
References
----------
.. [1]: https://asv.readthedocs.io/en/stable/writing_benchmarks.html#peak-memory
"""
pass
def peakmem_phase_cross_correlation(self, ndims, image_size,
upscale_factor):
result = phase_cross_correlation(self.reference_image,
self.shifted_image,
upscale_factor,
space="fourier")

View File

@ -0,0 +1,150 @@
import numpy as np
from skimage.data import camera
from skimage import restoration, data, io
import scipy.ndimage as ndi
class RestorationSuite:
"""Benchmark for restoration routines in scikit image."""
def setup(self):
nz = 32
self.volume_f64 = np.stack([camera()[::2, ::2], ] * nz,
axis=-1).astype(float) / 255
self.sigma = .05
self.volume_f64 += self.sigma * np.random.randn(*self.volume_f64.shape)
self.volume_f32 = self.volume_f64.astype(np.float32)
def peakmem_setup(self):
pass
def time_denoise_nl_means_f64(self):
restoration.denoise_nl_means(self.volume_f64, patch_size=3,
patch_distance=2, sigma=self.sigma,
h=0.7 * self.sigma, fast_mode=False,
multichannel=False)
def time_denoise_nl_means_f32(self):
restoration.denoise_nl_means(self.volume_f32, patch_size=3,
patch_distance=2, sigma=self.sigma,
h=0.7 * self.sigma, fast_mode=False,
multichannel=False)
def time_denoise_nl_means_fast_f64(self):
restoration.denoise_nl_means(self.volume_f64, patch_size=3,
patch_distance=2, sigma=self.sigma,
h=0.7 * self.sigma, fast_mode=True,
multichannel=False)
def time_denoise_nl_means_fast_f32(self):
restoration.denoise_nl_means(self.volume_f32, patch_size=3,
patch_distance=2, sigma=self.sigma,
h=0.7 * self.sigma, fast_mode=True)
def peakmem_denoise_nl_means_f64(self):
restoration.denoise_nl_means(self.volume_f64, patch_size=3,
patch_distance=2, sigma=self.sigma,
h=0.7 * self.sigma, fast_mode=False,
multichannel=False)
def peakmem_denoise_nl_means_f32(self):
restoration.denoise_nl_means(self.volume_f32, patch_size=3,
patch_distance=2, sigma=self.sigma,
h=0.7 * self.sigma, fast_mode=False)
def peakmem_denoise_nl_means_fast_f64(self):
restoration.denoise_nl_means(self.volume_f64, patch_size=3,
patch_distance=2, sigma=self.sigma,
h=0.7 * self.sigma, fast_mode=True,
multichannel=False)
def peakmem_denoise_nl_means_fast_f32(self):
restoration.denoise_nl_means(self.volume_f32, patch_size=3,
patch_distance=2, sigma=self.sigma,
h=0.7 * self.sigma, fast_mode=True,
multichannel=False)
class DeconvolutionSuite:
"""Benchmark for restoration routines in scikit image."""
def setup(self):
nz = 32
self.volume_f64 = np.stack([camera()[::2, ::2], ] * nz,
axis=-1).astype(float) / 255
self.sigma = .02
self.psf_f64 = np.ones((5, 5, 5)) / 125
self.psf_f32 = self.psf_f64.astype(np.float32)
self.volume_f64 = ndi.convolve(self.volume_f64, self.psf_f64)
self.volume_f64 += self.sigma * np.random.randn(*self.volume_f64.shape)
self.volume_f32 = self.volume_f64.astype(np.float32)
def peakmem_setup(self):
pass
def time_richardson_lucy_f64(self):
restoration.richardson_lucy(self.volume_f64, self.psf_f64,
iterations=10)
def time_richardson_lucy_f32(self):
restoration.richardson_lucy(self.volume_f32, self.psf_f32,
iterations=10)
# use iterations=1 for peak-memory cases to save time
def peakmem_richardson_lucy_f64(self):
restoration.richardson_lucy(self.volume_f64, self.psf_f64,
iterations=1)
def peakmem_richardson_lucy_f32(self):
restoration.richardson_lucy(self.volume_f32, self.psf_f32,
iterations=1)
class RollingBall(object):
"""Benchmark Rolling Ball algorithm."""
timeout = 120
def time_rollingball(self, radius):
restoration.rolling_ball(data.coins(), radius=radius)
time_rollingball.params = [25, 50, 75, 100, 150, 200]
time_rollingball.param_names = ["radius"]
def peakmem_reference(self, *args):
"""Provide reference for memory measurement with empty benchmark.
Peakmem benchmarks measure the maximum amount of RAM used by a
function. However, this maximum also includes the memory used
during the setup routine (as of asv 0.2.1; see [1]_).
Measuring an empty peakmem function might allow us to disambiguate
between the memory used by setup and the memory used by target (see
other ``peakmem_`` functions below).
References
----------
.. [1]: https://asv.readthedocs.io/en/stable/writing_benchmarks.html#peak-memory
"""
pass
def peakmem_rollingball(self, radius):
restoration.rolling_ball(data.coins(), radius=radius)
peakmem_rollingball.params = [25, 50, 75, 100, 150, 200]
peakmem_rollingball.param_names = ["radius"]
def time_rollingball_nan(self, radius):
image = data.coins().astype(float)
pos = np.arange(np.min(image.shape))
image[pos, pos] = np.NaN
restoration.rolling_ball(image, radius=radius, nansafe=True)
time_rollingball_nan.params = [25, 50, 75, 100, 150, 200]
time_rollingball_nan.param_names = ["radius"]
def time_rollingball_ndim(self):
from skimage.restoration.rolling_ball import ellipsoid_kernel
image = data.cells3d()[:, 1, ...]
kernel = ellipsoid_kernel((1, 100, 100), 100)
restoration.rolling_ball(
image, kernel=kernel)
def time_rollingball_threads(self, threads):
restoration.rolling_ball(data.coins(), radius=100, num_threads=threads)
time_rollingball_threads.params = range(0, 9)
time_rollingball_threads.param_names = ["threads"]

View File

@ -0,0 +1,83 @@
# See "Writing benchmarks" in the asv docs for more information.
import numpy as np
from numpy.lib import NumpyVersion as Version
import skimage
from skimage import segmentation
class SlicSegmentation:
"""Benchmark for segmentation routines in scikit-image."""
def setup(self):
self.image = np.random.random((200, 200, 100))
self.image[:100, :100, :] += 1
self.image[150:, 150:, :] += 0.5
self.msk = np.zeros((200, 200, 100))
self.msk[10:-10, 10:-10, 10:-10] = 1
self.msk_slice = self.msk[..., 50]
if Version(skimage.__version__) >= Version('0.17.0'):
self.slic_kwargs = dict(start_label=1)
else:
self.slic_kwargs = {}
def time_slic_basic(self):
segmentation.slic(self.image, enforce_connectivity=False,
multichannel=False, **self.slic_kwargs)
def time_slic_basic_multichannel(self):
segmentation.slic(self.image, enforce_connectivity=False,
multichannel=True, **self.slic_kwargs)
def peakmem_setup(self):
"""peakmem includes the memory used by setup.
Peakmem benchmarks measure the maximum amount of RAM used by a
function. However, this maximum also includes the memory used
by ``setup`` (as of asv 0.2.1; see [1]_)
Measuring an empty peakmem function might allow us to disambiguate
between the memory used by setup and the memory used by slic (see
``peakmem_slic_basic``, below).
References
----------
.. [1]: https://asv.readthedocs.io/en/stable/writing_benchmarks.html#peak-memory
"""
pass
def peakmem_slic_basic(self):
segmentation.slic(self.image, enforce_connectivity=False,
multichannel=False, **self.slic_kwargs)
def peakmem_slic_basic_multichannel(self):
segmentation.slic(self.image, enforce_connectivity=False,
multichannel=True, **self.slic_kwargs)
class MaskSlicSegmentation(SlicSegmentation):
"""Benchmark for segmentation routines in scikit-image."""
def setup(self):
try:
mask = np.zeros((64, 64)) > 0
mask[10:-10, 10:-10] = 1
segmentation.slic(np.ones_like(mask), mask=mask)
except TypeError:
raise NotImplementedError("masked slic unavailable")
self.image = np.random.random((200, 200, 100))
self.image[:100, :100, :] += 1
self.image[150:, 150:, :] += 0.5
self.msk = np.zeros((200, 200, 100))
self.msk[10:-10, 10:-10, 10:-10] = 1
self.msk_slice = self.msk[..., 50]
if Version(skimage.__version__) >= Version('0.17.0'):
self.slic_kwargs = dict(start_label=1)
else:
self.slic_kwargs = {}
def time_mask_slic(self):
segmentation.slic(self.image, enforce_connectivity=False,
mask=self.msk, multichannel=False)
def time_mask_slic_multichannel(self):
segmentation.slic(self.image, enforce_connectivity=False,
mask=self.msk_slice, multichannel=True)

View File

@ -0,0 +1,14 @@
import numpy as np
from skimage import transform
class TransformSuite:
"""Benchmark for transform routines in scikit-image."""
def setup(self):
self.image = np.zeros((2000, 2000))
idx = np.arange(500, 1500)
self.image[idx[::-1], idx] = 255
self.image[idx, idx] = 255
def time_hough_line(self):
result1, result2, result3 = transform.hough_line(self.image)

View File

@ -0,0 +1,54 @@
import numpy as np
from skimage.transform import SimilarityTransform, warp
import warnings
import functools
import inspect
try:
from skimage.util.dtype import _convert as convert
except ImportError:
from skimage.util.dtype import convert
class WarpSuite:
params = ([np.uint8, np.uint16, np.float32, np.float64],
[128, 1024, 4096],
[0, 1, 3],
# [np.float32, np.float64]
)
# param_names = ['dtype_in', 'N', 'order', 'dtype_tform']
param_names = ['dtype_in', 'N', 'order']
# def setup(self, dtype_in, N, order, dtype_tform):
def setup(self, dtype_in, N, order):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Possible precision loss")
self.image = convert(np.random.random((N, N)), dtype=dtype_in)
self.tform = SimilarityTransform(scale=1, rotation=np.pi / 10,
translation=(0, 4))
self.tform.params = self.tform.params.astype('float32')
self.order = order
if 'dtype' in inspect.signature(warp).parameters:
self.warp = functools.partial(warp, dtype=self.image.dtype)
else:
# Keep a call to functools to have the same number of python
# function calls
self.warp = functools.partial(warp)
# def time_same_type(self, dtype_in, N, order, dtype_tform):
def time_same_type(self, dtype_in, N, order):
"""Test the case where the users wants to preserve their same low
precision data type."""
result = self.warp(self.image, self.tform, order=self.order,
preserve_range=True)
# convert back to input type, no-op if same type
result = result.astype(dtype_in, copy=False)
# def time_to_float64(self, dtype_in, N, order, dtype_form):
def time_to_float64(self, dtype_in, N, order):
"""Test the case where want to upvert to float64 for continued
transformations."""
result = warp(self.image, self.tform, order=self.order,
preserve_range=True)

141
deps/scikit-image/doc/Makefile vendored Normal file
View File

@ -0,0 +1,141 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
PYTHON ?= python
SPHINXOPTS ?= -j auto
SPHINXBUILD ?= $(PYTHON) -m sphinx
SPHINXCACHE ?= build/doctrees
PAPER ?=
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(SPHINXCACHE) $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
DEST = build
.PHONY: all help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest gitwash gh-pages release_notes
all: html
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " gitwash to update the gitwash documentation"
clean:
-rm -rf $(DEST)
-rm -rf api
-rm -rf source/api
-find ./source/auto_examples/* -type f | grep -v blank | xargs -r rm -f
api:
@mkdir -p source/api
$(PYTHON) tools/build_modref_templates.py
@echo "Build API docs...done."
release_notes:
@echo "Copying release notes"
@tail -n +4 release/release_$$(\
ls release/release_*.*.* \
| sed s:release/release_:: \
| sort -nrt. -k1,1 -k2,2 \
| head -n 1) \
> release/_release_notes_for_docs.rst
html: api release_notes
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(DEST)/html
cp -r source/plots $(DEST)/html
@echo
@echo "Build finished. The HTML pages are in build/html."
$(PYTHON) source/random_gallery.py
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(DEST)/dirhtml
@echo
@echo "Build finished. The HTML pages are in build/dirhtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(DEST)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(DEST)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(DEST)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in build/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(DEST)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in build/qthelp, like this:"
@echo "# qcollectiongenerator build/qthelp/scikitimage.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile build/qthelp/scikitimage.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(DEST)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/scikitimage"
@echo "# ln -s build/devhelp $$HOME/.local/share/devhelp/scikitimage"
@echo "# devhelp"
latex: api
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(DEST)/latex
@echo
@echo "Build finished; the LaTeX files are in $(DEST)/latex."
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
"run these through (pdf)latex."
latexpdf: latex
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(DEST)/latex
@echo "Running LaTeX files through pdflatex..."
make -C build/latex all-pdf
@echo "pdflatex finished; the PDF files are in build/latex."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(DEST)/changes
@echo
@echo "The overview file is in build/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(DEST)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in build/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(DEST)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in build/doctest/output.txt."
gh-pages:
$(PYTHON) gh-pages.py
gitwash:
$(PYTHON) tools/gitwash/gitwash_dumper.py source scikit-image \
--project-url=https://scikit-image.org \
--project-ml-url=https://mail.python.org/mailman3/lists/scikit-image.python.org/ \
--repo-name=scikit-image \
--github-user=scikit-image \
--source-suffix=.rst

0
deps/scikit-image/doc/__init__.py vendored Normal file
View File

View File

@ -0,0 +1,10 @@
.. _examples_gallery:
General examples
-------------------
General-purpose and introductory examples for scikit-image.
The `narrative documentation <../user_guide.html>`_ introduces
conventions and basic image manipulations.

View File

@ -0,0 +1,2 @@
Longer examples and demonstrations
----------------------------------

View File

@ -0,0 +1,310 @@
"""
============================
Explore 3D images (of cells)
============================
This tutorial is an introduction to three-dimensional image processing. Images
are represented as `numpy` arrays. A single-channel, or grayscale, image is a
2D matrix of pixel intensities of shape ``(n_row, n_col)``, where ``n_row``
(resp. ``n_col``) denotes the number of `rows` (resp. `columns`). We can
construct a 3D volume as a series of 2D `planes`, giving 3D images the shape
``(n_plane, n_row, n_col)``, where ``n_plane`` is the number of planes.
A multichannel, or RGB(A), image has an additional
`channel` dimension in the final position containing color information.
These conventions are summarized in the table below:
=============== =================================
Image type Coordinates
=============== =================================
2D grayscale ``[row, column]``
2D multichannel ``[row, column, channel]``
3D grayscale ``[plane, row, column]``
3D multichannel ``[plane, row, column, channel]``
=============== =================================
Some 3D images are constructed with equal resolution in each dimension (e.g.,
synchrotron tomography or computer-generated rendering of a sphere).
But most experimental data are captured
with a lower resolution in one of the three dimensions, e.g., photographing
thin slices to approximate a 3D structure as a stack of 2D images.
The distance between pixels in each dimension, called spacing, is encoded as a
tuple and is accepted as a parameter by some `skimage` functions and can be
used to adjust contributions to filters.
The data used in this tutorial were provided by the Allen Institute for Cell
Science. They were downsampled by a factor of 4 in the `row` and `column`
dimensions to reduce their size and, hence, computational time. The spacing
information was reported by the microscope used to image the cells.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import numpy as np
from skimage import exposure, io, util
from skimage.data import cells3d
#####################################################################
# Load and display 3D images
# ==========================
data = util.img_as_float(cells3d()[:, 1, :, :]) # grab just the nuclei
print("shape: {}".format(data.shape))
print("dtype: {}".format(data.dtype))
print("range: ({}, {})".format(data.min(), data.max()))
# Report spacing from microscope
original_spacing = np.array([0.2900000, 0.0650000, 0.0650000])
# Account for downsampling of slices by 4
rescaled_spacing = original_spacing * [1, 4, 4]
# Normalize spacing so that pixels are a distance of 1 apart
spacing = rescaled_spacing / rescaled_spacing[2]
print("microscope spacing: {}\n".format(original_spacing))
print("rescaled spacing: {} (after downsampling)\n".format(rescaled_spacing))
print("normalized spacing: {}\n".format(spacing))
#####################################################################
# Let us try and visualize the (3D) image with `io.imshow`.
try:
io.imshow(data, cmap="gray")
except TypeError as e:
print(str(e))
#####################################################################
# The `io.imshow` function can only display grayscale and RGB(A) 2D images.
# We can thus use it to visualize 2D planes. By fixing one axis, we can
# observe three different views of the image.
def show_plane(ax, plane, cmap="gray", title=None):
ax.imshow(plane, cmap=cmap)
ax.axis("off")
if title:
ax.set_title(title)
(n_plane, n_row, n_col) = data.shape
_, (a, b, c) = plt.subplots(ncols=3, figsize=(15, 5))
show_plane(a, data[n_plane // 2], title=f'Plane = {n_plane // 2}')
show_plane(b, data[:, n_row // 2, :], title=f'Row = {n_row // 2}')
show_plane(c, data[:, :, n_col // 2], title=f'Column = {n_col // 2}')
#####################################################################
# As hinted before, a three-dimensional image can be viewed as a series of
# two-dimensional planes. Let us write a helper function, `display`, to
# display 30 planes of our data. By default, every other plane is displayed.
def display(im3d, cmap="gray", step=2):
_, axes = plt.subplots(nrows=5, ncols=6, figsize=(16, 14))
vmin = im3d.min()
vmax = im3d.max()
for ax, image in zip(axes.flatten(), im3d[::step]):
ax.imshow(image, cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
display(data)
#####################################################################
# Alternatively, we can explore these planes (slices) interactively using
# Jupyter widgets. Let the user select which slice to display and show the
# position of this slice in the 3D dataset.
# Note that you cannot see the Jupyter widget at work in a static HTML page,
# as is the case in the scikit-image gallery. For the following piece of
# code to work, you need a Jupyter kernel running either locally or in the
# cloud: see the bottom of this page to either download the Jupyter notebook
# and run it on your computer, or open it directly in Binder.
def slice_in_3D(ax, i):
# From https://stackoverflow.com/questions/44881885/python-draw-3d-cube
Z = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
Z = Z * data.shape
r = [-1, 1]
X, Y = np.meshgrid(r, r)
# Plot vertices
ax.scatter3D(Z[:, 0], Z[:, 1], Z[:, 2])
# List sides' polygons of figure
verts = [[Z[0], Z[1], Z[2], Z[3]],
[Z[4], Z[5], Z[6], Z[7]],
[Z[0], Z[1], Z[5], Z[4]],
[Z[2], Z[3], Z[7], Z[6]],
[Z[1], Z[2], Z[6], Z[5]],
[Z[4], Z[7], Z[3], Z[0]],
[Z[2], Z[3], Z[7], Z[6]]]
# Plot sides
ax.add_collection3d(
Poly3DCollection(
verts,
facecolors=(0, 1, 1, 0.25),
linewidths=1,
edgecolors="darkblue"
)
)
verts = np.array([[[0, 0, 0],
[0, 0, 1],
[0, 1, 1],
[0, 1, 0]]])
verts = verts * (60, 256, 256)
verts += [i, 0, 0]
ax.add_collection3d(
Poly3DCollection(
verts,
facecolors="magenta",
linewidths=1,
edgecolors="black"
)
)
ax.set_xlabel("plane")
ax.set_xlim(0, 100)
ax.set_ylabel("row")
ax.set_zlabel("col")
# Autoscale plot axes
scaling = np.array([getattr(ax,
f'get_{dim}lim')() for dim in "xyz"])
ax.auto_scale_xyz(* [[np.min(scaling), np.max(scaling)]] * 3)
def explore_slices(data, cmap="gray"):
from ipywidgets import interact
N = len(data)
@interact(plane=(0, N - 1))
def display_slice(plane=34):
fig, ax = plt.subplots(figsize=(20, 5))
ax_3D = fig.add_subplot(133, projection="3d")
show_plane(ax, data[plane], title="Plane {}".format(plane), cmap=cmap)
slice_in_3D(ax_3D, plane)
plt.show()
return display_slice
explore_slices(data);
#####################################################################
# Adjust exposure
# ===============
# Scikit-image's `exposure` module contains a number of functions for
# adjusting image contrast. These functions operate on pixel values.
# Generally, image dimensionality or pixel spacing doesn't need to be
# considered. In local exposure correction, though, one might want to
# adjust the window size to ensure equal size in *real* coordinates along
# each axis.
#####################################################################
# `Gamma correction <https://en.wikipedia.org/wiki/Gamma_correction>`_
# brightens or darkens an image. A power-law transform, where `gamma` denotes
# the power-law exponent, is applied to each pixel in the image: `gamma < 1`
# will brighten an image, while `gamma > 1` will darken an image.
def plot_hist(ax, data, title=None):
# Helper function for plotting histograms
ax.hist(data.ravel(), bins=256)
ax.ticklabel_format(axis="y", style="scientific", scilimits=(0, 0))
if title:
ax.set_title(title)
gamma_low_val = 0.5
gamma_low = exposure.adjust_gamma(data, gamma=gamma_low_val)
gamma_high_val = 1.5
gamma_high = exposure.adjust_gamma(data, gamma=gamma_high_val)
_, ((a, b, c), (d, e, f)) = plt.subplots(nrows=2, ncols=3, figsize=(12, 8))
show_plane(a, data[32], title='Original')
show_plane(b, gamma_low[32], title=f'Gamma = {gamma_low_val}')
show_plane(c, gamma_high[32], title=f'Gamma = {gamma_high_val}')
plot_hist(d, data)
plot_hist(e, gamma_low)
plot_hist(f, gamma_high)
# sphinx_gallery_thumbnail_number = 4
#####################################################################
# `Histogram
# equalization <https://en.wikipedia.org/wiki/Histogram_equalization>`_
# improves contrast in an image by redistributing pixel intensities. The most
# common pixel intensities get spread out, increasing contrast in low-contrast
# areas. One downside of this approach is that it may enhance background
# noise.
equalized_data = exposure.equalize_hist(data)
display(equalized_data)
#####################################################################
# As before, if we have a Jupyter kernel running, we can explore the above
# slices interactively.
explore_slices(equalized_data);
#####################################################################
# Let us now plot the image histogram before and after histogram equalization.
# Below, we plot the respective cumulative distribution functions (CDF).
_, ((a, b), (c, d)) = plt.subplots(nrows=2, ncols=2, figsize=(16, 8))
plot_hist(a, data, title="Original histogram")
plot_hist(b, equalized_data, title="Equalized histogram")
cdf, bins = exposure.cumulative_distribution(data.ravel())
c.plot(bins, cdf, "r")
c.set_title("Original CDF")
cdf, bins = exposure.cumulative_distribution(equalized_data.ravel())
d.plot(bins, cdf, "r")
d.set_title("Histogram equalization CDF")
#####################################################################
# Most experimental images are affected by salt and pepper noise. A few bright
# artifacts can decrease the relative intensity of the pixels of interest. A
# simple way to improve contrast is to clip the pixel values on the lowest and
# highest extremes. Clipping the darkest and brightest 0.5% of pixels will
# increase the overall contrast of the image.
vmin, vmax = np.percentile(data, q=(0.5, 99.5))
clipped_data = exposure.rescale_intensity(
data,
in_range=(vmin, vmax),
out_range=np.float32
)
display(clipped_data)

View File

@ -0,0 +1,157 @@
"""
==================================================
Comparing edge-based and region-based segmentation
==================================================
In this example, we will see how to segment objects from a background. We use
the ``coins`` image from ``skimage.data``, which shows several coins outlined
against a darker background.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.exposure import histogram
coins = data.coins()
hist, hist_centers = histogram(coins)
fig, axes = plt.subplots(1, 2, figsize=(8, 3))
axes[0].imshow(coins, cmap=plt.cm.gray)
axes[0].axis('off')
axes[1].plot(hist_centers, hist, lw=2)
axes[1].set_title('histogram of gray values')
######################################################################
#
# Thresholding
# ============
#
# A simple way to segment the coins is to choose a threshold based on the
# histogram of gray values. Unfortunately, thresholding this image gives a
# binary image that either misses significant parts of the coins or merges
# parts of the background with the coins:
fig, axes = plt.subplots(1, 2, figsize=(8, 3), sharey=True)
axes[0].imshow(coins > 100, cmap=plt.cm.gray)
axes[0].set_title('coins > 100')
axes[1].imshow(coins > 150, cmap=plt.cm.gray)
axes[1].set_title('coins > 150')
for a in axes:
a.axis('off')
plt.tight_layout()
######################################################################
# Edge-based segmentation
# =======================
#
# Next, we try to delineate the contours of the coins using edge-based
# segmentation. To do this, we first get the edges of features using the
# Canny edge-detector.
from skimage.feature import canny
edges = canny(coins)
fig, ax = plt.subplots(figsize=(4, 3))
ax.imshow(edges, cmap=plt.cm.gray)
ax.set_title('Canny detector')
ax.axis('off')
######################################################################
# These contours are then filled using mathematical morphology.
from scipy import ndimage as ndi
fill_coins = ndi.binary_fill_holes(edges)
fig, ax = plt.subplots(figsize=(4, 3))
ax.imshow(fill_coins, cmap=plt.cm.gray)
ax.set_title('filling the holes')
ax.axis('off')
######################################################################
# Small spurious objects are easily removed by setting a minimum size for
# valid objects.
from skimage import morphology
coins_cleaned = morphology.remove_small_objects(fill_coins, 21)
fig, ax = plt.subplots(figsize=(4, 3))
ax.imshow(coins_cleaned, cmap=plt.cm.gray)
ax.set_title('removing small objects')
ax.axis('off')
######################################################################
# However, this method is not very robust, since contours that are not
# perfectly closed are not filled correctly, as is the case for one unfilled
# coin above.
#
# Region-based segmentation
# =========================
#
# We therefore try a region-based method using the watershed transform.
# First, we find an elevation map using the Sobel gradient of the image.
from skimage.filters import sobel
elevation_map = sobel(coins)
fig, ax = plt.subplots(figsize=(4, 3))
ax.imshow(elevation_map, cmap=plt.cm.gray)
ax.set_title('elevation map')
ax.axis('off')
######################################################################
# Next we find markers of the background and the coins based on the extreme
# parts of the histogram of gray values.
markers = np.zeros_like(coins)
markers[coins < 30] = 1
markers[coins > 150] = 2
fig, ax = plt.subplots(figsize=(4, 3))
ax.imshow(markers, cmap=plt.cm.nipy_spectral)
ax.set_title('markers')
ax.axis('off')
######################################################################
# Finally, we use the watershed transform to fill regions of the elevation
# map starting from the markers determined above:
from skimage import segmentation
segmentation_coins = segmentation.watershed(elevation_map, markers)
fig, ax = plt.subplots(figsize=(4, 3))
ax.imshow(segmentation_coins, cmap=plt.cm.gray)
ax.set_title('segmentation')
ax.axis('off')
######################################################################
# This last method works even better, and the coins can be segmented and
# labeled individually.
from skimage.color import label2rgb
segmentation_coins = ndi.binary_fill_holes(segmentation_coins - 1)
labeled_coins, _ = ndi.label(segmentation_coins)
image_label_overlay = label2rgb(labeled_coins, image=coins, bg_label=0)
fig, axes = plt.subplots(1, 2, figsize=(8, 3), sharey=True)
axes[0].imshow(coins, cmap=plt.cm.gray)
axes[0].contour(segmentation_coins, [0.5], linewidths=1.2, colors='y')
axes[1].imshow(image_label_overlay)
for a in axes:
a.axis('off')
plt.tight_layout()
plt.show()

View File

@ -0,0 +1,102 @@
"""
=========================================
Face detection using a cascade classifier
=========================================
This computer vision example shows how to detect faces on an image using object
detection framework based on machine learning.
First, you will need an xml file, from which the trained data can be read. The
framework works with files, trained using Multi-block Local Binary Patterns
Features (See `MB-LBP <plot_multiblock_local_binary_pattern.html>`_) and Gentle
Adaboost with attentional cascade. So, the detection framework will also work
with `xml files from OpenCV
<https://github.com/opencv/opencv/tree/master/data/lbpcascades>`_. There you
can find files that were trained to detect cat faces, profile faces and other
things. But if you want to detect frontal faces, the respective file is
already included in scikit-image.
Next you will have to specify the parameters for the ``detect_multi_scale``
function. Here you can find the meaning of each of them.
First one is ``scale_ratio``. To find all faces, the algorithm does the search
on multiple scales. This is done by changing the size of searching window. The
smallest window size is the size of window that was used in training. This size
is specified in the xml file with trained parameters. The ``scale_ratio``
parameter specifies by which ratio the search window is increased on each
step. If you increase this parameter, the search time decreases and the
accuracy decreases. So, faces on some scales can be not detected.
``step_ratio`` specifies the step of sliding window that is used to search for
faces on each scale of the image. If this parameter is equal to one, then all
the possible locations are searched. If the parameter is greater than one, for
example, two, the window will be moved by two pixels and not all of the
possible locations will be searched for faces. By increasing this parameter we
can reduce the working time of the algorithm, but the accuracy will also be
decreased.
``min_size`` is the minimum size of search window during the scale
search. ``max_size`` specifies the maximum size of the window. If you know the
size of faces on the images that you want to search, you should specify these
parameters as precisely as possible, because you can avoid doing expensive
computations and possibly decrease the amount of false detections. You can save
a lot of time by increasing the ``min_size`` parameter, because the majority of
time is spent on searching on the smallest scales.
``min_neighbour_number`` and ``intersection_score_threshold`` parameters are
made to cluster the excessive detections of the same face and to filter out
false detections. True faces usually has a lot of dectections around them and
false ones usually have single detection. First algorithm searches for
clusters: two rectangle detections are placed in the same cluster if the
intersection score between them is larger then
``intersection_score_threshold``. The intersection score is computed using the
equation (intersection area) / (small rectangle ratio). The described
intersection criteria was chosen over intersection over union to avoid a corner
case when small rectangle inside of a big one have small intersection score.
Then each cluster is thresholded using ``min_neighbour_number`` parameter which
leaves the clusters that have a same or bigger number of detections in them.
You should also take into account that false detections are inevitable and if
you want to have a really precise detector, you will have to train it yourself
using `OpenCV train cascade utility
<https://docs.opencv.org/2.4/doc/user_guide/ug_traincascade.html>`_.
"""
from skimage import data
from skimage.feature import Cascade
import matplotlib.pyplot as plt
from matplotlib import patches
# Load the trained file from the module root.
trained_file = data.lbp_frontal_face_cascade_filename()
# Initialize the detector cascade.
detector = Cascade(trained_file)
img = data.astronaut()
detected = detector.detect_multi_scale(img=img,
scale_factor=1.2,
step_ratio=1,
min_size=(60, 60),
max_size=(123, 123))
plt.imshow(img)
img_desc = plt.gca()
plt.set_cmap('gray')
for patch in detected:
img_desc.add_patch(
patches.Rectangle(
(patch['c'], patch['r']),
patch['width'],
patch['height'],
fill=False,
color='r',
linewidth=2
)
)
plt.show()

View File

@ -0,0 +1,174 @@
"""
======================================================
Face classification using Haar-like feature descriptor
======================================================
Haar-like feature descriptors were successfully used to implement the first
real-time face detector [1]_. Inspired by this application, we propose an
example illustrating the extraction, selection, and classification of Haar-like
features to detect faces vs. non-faces.
Notes
-----
This example relies on `scikit-learn <https://scikit-learn.org/>`_ for feature
selection and classification.
References
----------
.. [1] Viola, Paul, and Michael J. Jones. "Robust real-time face
detection." International journal of computer vision 57.2
(2004): 137-154.
https://www.merl.com/publications/docs/TR2004-043.pdf
:DOI:`10.1109/CVPR.2001.990517`
"""
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from dask import delayed
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from skimage.data import lfw_subset
from skimage.transform import integral_image
from skimage.feature import haar_like_feature
from skimage.feature import haar_like_feature_coord
from skimage.feature import draw_haar_like_feature
###########################################################################
# The procedure to extract the Haar-like features from an image is relatively
# simple. Firstly, a region of interest (ROI) is defined. Secondly, the
# integral image within this ROI is computed. Finally, the integral image is
# used to extract the features.
@delayed
def extract_feature_image(img, feature_type, feature_coord=None):
"""Extract the haar feature for the current image"""
ii = integral_image(img)
return haar_like_feature(ii, 0, 0, ii.shape[0], ii.shape[1],
feature_type=feature_type,
feature_coord=feature_coord)
###########################################################################
# We use a subset of CBCL dataset which is composed of 100 face images and
# 100 non-face images. Each image has been resized to a ROI of 19 by 19
# pixels. We select 75 images from each group to train a classifier and
# determine the most salient features. The remaining 25 images from each
# class are used to assess the performance of the classifier.
images = lfw_subset()
# To speed up the example, extract the two types of features only
feature_types = ['type-2-x', 'type-2-y']
# Build a computation graph using Dask. This allows the use of multiple
# CPU cores later during the actual computation
X = delayed(extract_feature_image(img, feature_types) for img in images)
# Compute the result
t_start = time()
X = np.array(X.compute(scheduler='threads'))
time_full_feature_comp = time() - t_start
# Label images (100 faces and 100 non-faces)
y = np.array([1] * 100 + [0] * 100)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=150,
random_state=0,
stratify=y)
# Extract all possible features
feature_coord, feature_type = \
haar_like_feature_coord(width=images.shape[2], height=images.shape[1],
feature_type=feature_types)
###########################################################################
# A random forest classifier can be trained in order to select the most
# salient features, specifically for face classification. The idea is to
# determine which features are most often used by the ensemble of trees.
# By using only the most salient features in subsequent steps, we can
# drastically speed up the computation while retaining accuracy.
# Train a random forest classifier and assess its performance
clf = RandomForestClassifier(n_estimators=1000, max_depth=None,
max_features=100, n_jobs=-1, random_state=0)
t_start = time()
clf.fit(X_train, y_train)
time_full_train = time() - t_start
auc_full_features = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
# Sort features in order of importance and plot the six most significant
idx_sorted = np.argsort(clf.feature_importances_)[::-1]
fig, axes = plt.subplots(3, 2)
for idx, ax in enumerate(axes.ravel()):
image = images[0]
image = draw_haar_like_feature(image, 0, 0,
images.shape[2],
images.shape[1],
[feature_coord[idx_sorted[idx]]])
ax.imshow(image)
ax.set_xticks([])
ax.set_yticks([])
_ = fig.suptitle('The most important features')
###########################################################################
# We can select the most important features by checking the cumulative sum
# of the feature importance. In this example, we keep the features
# representing 70% of the cumulative value (which corresponds to using only 3%
# of the total number of features).
cdf_feature_importances = np.cumsum(clf.feature_importances_[idx_sorted])
cdf_feature_importances /= cdf_feature_importances[-1] # divide by max value
sig_feature_count = np.count_nonzero(cdf_feature_importances < 0.7)
sig_feature_percent = round(sig_feature_count /
len(cdf_feature_importances) * 100, 1)
print(('{} features, or {}%, account for 70% of branch points in the '
'random forest.').format(sig_feature_count, sig_feature_percent))
# Select the determined number of most informative features
feature_coord_sel = feature_coord[idx_sorted[:sig_feature_count]]
feature_type_sel = feature_type[idx_sorted[:sig_feature_count]]
# Note: it is also possible to select the features directly from the matrix X,
# but we would like to emphasize the usage of `feature_coord` and `feature_type`
# to recompute a subset of desired features.
# Build the computational graph using Dask
X = delayed(extract_feature_image(img, feature_type_sel, feature_coord_sel)
for img in images)
# Compute the result
t_start = time()
X = np.array(X.compute(scheduler='threads'))
time_subs_feature_comp = time() - t_start
y = np.array([1] * 100 + [0] * 100)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=150,
random_state=0,
stratify=y)
###########################################################################
# Once the features are extracted, we can train and test a new classifier.
t_start = time()
clf.fit(X_train, y_train)
time_subs_train = time() - t_start
auc_subs_features = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
summary = (('Computing the full feature set took {:.3f}s, plus {:.3f}s '
'training, for an AUC of {:.2f}. Computing the restricted '
'feature set took {:.3f}s, plus {:.3f}s training, '
'for an AUC of {:.2f}.')
.format(time_full_feature_comp, time_full_train,
auc_full_features, time_subs_feature_comp,
time_subs_train, auc_subs_features))
print(summary)
plt.show()

View File

@ -0,0 +1,210 @@
"""
================================
Segment human cells (in mitosis)
================================
In this example, we analyze a microscopy image of human cells. We use data
provided by Jason Moffat [1]_ through
`CellProfiler <https://cellprofiler.org/examples/#human-cells>`_.
.. [1] Moffat J, Grueneberg DA, Yang X, Kim SY, Kloepfer AM, Hinkle G, Piqani
B, Eisenhaure TM, Luo B, Grenier JK, Carpenter AE, Foo SY, Stewart SA,
Stockwell BR, Hacohen N, Hahn WC, Lander ES, Sabatini DM, Root DE
(2006) "A lentiviral RNAi library for human and mouse genes applied to
an arrayed viral high-content screen" Cell, 124(6):1283-98.
PMID: 16564017
:DOI:`10.1016/j.cell.2006.01.040`
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import (
color, feature, filters, measure, morphology, segmentation, util
)
from skimage.data import human_mitosis
image = human_mitosis()
fig, ax = plt.subplots()
ax.imshow(image, cmap='gray')
ax.set_title('Microscopy image of human cells stained for nuclear DNA')
plt.show()
#####################################################################
# We can see many cell nuclei on a dark background. Most of them are smooth
# and have an elliptical shape. However, we can distinguish some brighter
# spots corresponding to nuclei undergoing
# `mitosis <https://en.wikipedia.org/wiki/Mitosis>`_ (cell division).
#####################################################################
# Another way of visualizing a greyscale image is contour plotting:
fig, ax = plt.subplots(figsize=(5, 5))
qcs = ax.contour(image, origin='image')
ax.set_title('Contour plot of the same raw image')
# sphinx_gallery_thumbnail_number = 2
plt.show()
#####################################################################
# The contour lines are drawn at these levels:
qcs.levels
#####################################################################
# Each level has, respectively, the following number of segments:
[len(seg) for seg in qcs.allsegs]
#####################################################################
# Estimate the mitotic index
# ==========================
# Cell biology uses the
# `mitotic index <https://en.wikipedia.org/wiki/Mitotic_index>`_
# to quantify cell division and,
# hence, cell proliferation. By definition, it is the ratio of cells in
# mitosis over the total number of cells. To analyze the above image,
# we are thus interested in two thresholds: one separating the nuclei from the
# background, the other separating the dividing nuclei (brighter spots) from
# the non-dividing nuclei.
# To separate these three different classes of pixels, we
# resort to :ref:`sphx_glr_auto_examples_segmentation_plot_multiotsu.py`.
thresholds = filters.threshold_multiotsu(image, classes=3)
regions = np.digitize(image, bins=thresholds)
fig, ax = plt.subplots(ncols=2, figsize=(10, 5))
ax[0].imshow(image)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].imshow(regions)
ax[1].set_title('Multi-Otsu thresholding')
ax[1].axis('off')
plt.show()
#####################################################################
# Since there are overlapping nuclei, thresholding is not enough to segment
# all the nuclei.
# If it were, we could readily compute a mitotic index for this
# sample:
cells = image > thresholds[0]
dividing = image > thresholds[1]
labeled_cells = measure.label(cells)
labeled_dividing = measure.label(dividing)
naive_mi = labeled_dividing.max() / labeled_cells.max()
print(naive_mi)
#####################################################################
# Whoa, this can't be! The number of dividing nuclei
print(labeled_dividing.max())
#####################################################################
# is overestimated, while the total number of cells
print(labeled_cells.max())
#####################################################################
# is underestimated.
fig, ax = plt.subplots(ncols=3, figsize=(15, 5))
ax[0].imshow(image)
ax[0].set_title('Original')
ax[0].axis('off')
ax[2].imshow(cells)
ax[2].set_title('All nuclei?')
ax[2].axis('off')
ax[1].imshow(dividing)
ax[1].set_title('Dividing nuclei?')
ax[1].axis('off')
plt.show()
#####################################################################
# Count dividing nuclei
# =====================
# Clearly, not all connected regions in the middle plot are dividing nuclei.
# On one hand, the second threshold (value of ``thresholds[1]``) appears to be
# too low to separate those very bright areas corresponding to dividing nuclei
# from relatively bright pixels otherwise present in many nuclei. On the other
# hand, we want a smoother image, removing small spurious objects and,
# possibly, merging clusters of neighbouring objects (some could correspond to
# two nuclei emerging from one cell division). In a way, the segmentation
# challenge we are facing with dividing nuclei is the opposite of that with
# (touching) cells.
#####################################################################
# To find suitable values for thresholds and filtering parameters, we proceed
# by dichotomy, visually and manually.
higher_threshold = 125
dividing = image > higher_threshold
smoother_dividing = filters.rank.mean(util.img_as_ubyte(dividing),
morphology.disk(4))
binary_smoother_dividing = smoother_dividing > 20
fig, ax = plt.subplots(figsize=(5, 5))
ax.imshow(binary_smoother_dividing)
ax.set_title('Dividing nuclei')
ax.axis('off')
plt.show()
#####################################################################
# We are left with
cleaned_dividing = measure.label(binary_smoother_dividing)
print(cleaned_dividing.max())
#####################################################################
# dividing nuclei in this sample.
#####################################################################
# Segment nuclei
# ==============
# To separate overlapping nuclei, we resort to
# :ref:`sphx_glr_auto_examples_segmentation_plot_watershed.py`.
# To visualize the segmentation conveniently, we colour-code the labelled
# regions using the `color.label2rgb` function, specifying the background
# label with argument `bg_label=0`.
distance = ndi.distance_transform_edt(cells)
local_max_coords = feature.peak_local_max(distance, min_distance=7)
local_max_mask = np.zeros(distance.shape, dtype=bool)
local_max_mask[tuple(local_max_coords.T)] = True
markers = measure.label(local_max_mask)
segmented_cells = segmentation.watershed(-distance, markers, mask=cells)
fig, ax = plt.subplots(ncols=2, figsize=(10, 5))
ax[0].imshow(cells, cmap='gray')
ax[0].set_title('Overlapping nuclei')
ax[0].axis('off')
ax[1].imshow(color.label2rgb(segmented_cells, bg_label=0))
ax[1].set_title('Segmented nuclei')
ax[1].axis('off')
plt.show()
#####################################################################
# Additionally, we may use function `color.label2rgb` to overlay the original
# image with the segmentation result, using transparency (alpha parameter).
color_labels = color.label2rgb(segmented_cells, image, alpha=0.4, bg_label=0)
fig, ax = plt.subplots(figsize=(5, 5))
ax.imshow(color_labels)
ax.set_title('Segmentation result over raw image')
plt.show()
#####################################################################
# Finally, we find a total number of
print(segmented_cells.max())
#####################################################################
# cells in this sample. Therefore, we estimate the mitotic index to be:
print(cleaned_dividing.max() / segmented_cells.max())

View File

@ -0,0 +1,100 @@
"""
=======================
Visual image comparison
=======================
Image comparison is particularly useful when performing image processing tasks
such as exposure manipulations, filtering, and restauration.
This example shows how to easily compare two images with various approaches.
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from skimage import data, transform, exposure
from skimage.util import compare_images
img1 = data.coins()
img1_equalized = exposure.equalize_hist(img1)
img2 = transform.rotate(img1, 2)
comp_equalized = compare_images(img1, img1_equalized, method='checkerboard')
diff_rotated = compare_images(img1, img2, method='diff')
blend_rotated = compare_images(img1, img2, method='blend')
######################################################################
# Checkerboard
# ============
#
# The `checkerboard` method alternates tiles from the first and the second
# images.
fig = plt.figure(figsize=(8, 9))
gs = GridSpec(3, 2)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1:, :])
ax0.imshow(img1, cmap='gray')
ax0.set_title('Original')
ax1.imshow(img1_equalized, cmap='gray')
ax1.set_title('Equalized')
ax2.imshow(comp_equalized, cmap='gray')
ax2.set_title('Checkerboard comparison')
for a in (ax0, ax1, ax2):
a.axis('off')
plt.tight_layout()
plt.plot()
######################################################################
# Diff
# ====
#
# The `diff` method computes the absolute difference between the two images.
fig = plt.figure(figsize=(8, 9))
gs = GridSpec(3, 2)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1:, :])
ax0.imshow(img1, cmap='gray')
ax0.set_title('Original')
ax1.imshow(img2, cmap='gray')
ax1.set_title('Rotated')
ax2.imshow(diff_rotated, cmap='gray')
ax2.set_title('Diff comparison')
for a in (ax0, ax1, ax2):
a.axis('off')
plt.tight_layout()
plt.plot()
######################################################################
# Blend
# =====
#
# `blend` is the result of the average of the two images.
fig = plt.figure(figsize=(8, 9))
gs = GridSpec(3, 2)
ax0 = fig.add_subplot(gs[0, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1:, :])
ax0.imshow(img1, cmap='gray')
ax0.set_title('Original')
ax1.imshow(img2, cmap='gray')
ax1.set_title('Rotated')
ax2.imshow(blend_rotated, cmap='gray')
ax2.set_title('Blend comparison')
for a in (ax0, ax1, ax2):
a.axis('off')
plt.tight_layout()
plt.plot()

View File

@ -0,0 +1,240 @@
"""
=======================
Morphological Filtering
=======================
Morphological image processing is a collection of non-linear operations related
to the shape or morphology of features in an image, such as boundaries,
skeletons, etc. In any given technique, we probe an image with a small shape or
template called a structuring element, which defines the region of interest or
neighborhood around a pixel.
In this document we outline the following basic morphological operations:
1. Erosion
2. Dilation
3. Opening
4. Closing
5. White Tophat
6. Black Tophat
7. Skeletonize
8. Convex Hull
To get started, let's load an image using ``io.imread``. Note that morphology
functions only work on gray-scale or binary images, so we set ``as_gray=True``.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.util import img_as_ubyte
from skimage import io
orig_phantom = img_as_ubyte(data.shepp_logan_phantom())
fig, ax = plt.subplots()
ax.imshow(orig_phantom, cmap=plt.cm.gray)
######################################################################
# Let's also define a convenience function for plotting comparisons:
def plot_comparison(original, filtered, filter_name):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True,
sharey=True)
ax1.imshow(original, cmap=plt.cm.gray)
ax1.set_title('original')
ax1.axis('off')
ax2.imshow(filtered, cmap=plt.cm.gray)
ax2.set_title(filter_name)
ax2.axis('off')
######################################################################
# Erosion
# =======
#
# Morphological ``erosion`` sets a pixel at (i, j) to the *minimum over all
# pixels in the neighborhood centered at (i, j)*. The structuring element,
# ``selem``, passed to ``erosion`` is a boolean array that describes this
# neighborhood. Below, we use ``disk`` to create a circular structuring
# element, which we use for most of the following examples.
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
selem = disk(6)
eroded = erosion(orig_phantom, selem)
plot_comparison(orig_phantom, eroded, 'erosion')
######################################################################
# Notice how the white boundary of the image disappears or gets eroded as we
# increase the size of the disk. Also notice the increase in size of the two
# black ellipses in the center and the disappearance of the 3 light grey
# patches in the lower part of the image.
#
#Dilation
#========
#
#Morphological ``dilation`` sets a pixel at (i, j) to the *maximum over all
#pixels in the neighborhood centered at (i, j)*. Dilation enlarges bright
#regions and shrinks dark regions.
dilated = dilation(orig_phantom, selem)
plot_comparison(orig_phantom, dilated, 'dilation')
######################################################################
# Notice how the white boundary of the image thickens, or gets dilated, as we
#increase the size of the disk. Also notice the decrease in size of the two
#black ellipses in the centre, and the thickening of the light grey circle
#in the center and the 3 patches in the lower part of the image.
#
#Opening
#=======
#
#Morphological ``opening`` on an image is defined as an *erosion followed by
#a dilation*. Opening can remove small bright spots (i.e. "salt") and
#connect small dark cracks.
opened = opening(orig_phantom, selem)
plot_comparison(orig_phantom, opened, 'opening')
######################################################################
#Since ``opening`` an image starts with an erosion operation, light regions
#that are *smaller* than the structuring element are removed. The dilation
#operation that follows ensures that light regions that are *larger* than
#the structuring element retain their original size. Notice how the light
#and dark shapes in the center their original thickness but the 3 lighter
#patches in the bottom get completely eroded. The size dependence is
#highlighted by the outer white ring: The parts of the ring thinner than the
#structuring element were completely erased, while the thicker region at the
#top retains its original thickness.
#
#Closing
#=======
#
#Morphological ``closing`` on an image is defined as a *dilation followed by
#an erosion*. Closing can remove small dark spots (i.e. "pepper") and
#connect small bright cracks.
#
#To illustrate this more clearly, let's add a small crack to the white
#border:
phantom = orig_phantom.copy()
phantom[10:30, 200:210] = 0
closed = closing(phantom, selem)
plot_comparison(phantom, closed, 'closing')
######################################################################
# Since ``closing`` an image starts with an dilation operation, dark regions
# that are *smaller* than the structuring element are removed. The dilation
# operation that follows ensures that dark regions that are *larger* than the
# structuring element retain their original size. Notice how the white
# ellipses at the bottom get connected because of dilation, but other dark
# region retain their original sizes. Also notice how the crack we added is
# mostly removed.
#
# White tophat
# ============
#
# The ``white_tophat`` of an image is defined as the *image minus its
# morphological opening*. This operation returns the bright spots of the
# image that are smaller than the structuring element.
#
# To make things interesting, we'll add bright and dark spots to the image:
phantom = orig_phantom.copy()
phantom[340:350, 200:210] = 255
phantom[100:110, 200:210] = 0
w_tophat = white_tophat(phantom, selem)
plot_comparison(phantom, w_tophat, 'white tophat')
######################################################################
# As you can see, the 10-pixel wide white square is highlighted since it is
# smaller than the structuring element. Also, the thin, white edges around
# most of the ellipse are retained because they're smaller than the
# structuring element, but the thicker region at the top disappears.
#
# Black tophat
# ============
#
# The ``black_tophat`` of an image is defined as its morphological **closing
# minus the original image**. This operation returns the *dark spots of the
# image that are smaller than the structuring element*.
b_tophat = black_tophat(phantom, selem)
plot_comparison(phantom, b_tophat, 'black tophat')
######################################################################
#As you can see, the 10-pixel wide black square is highlighted since
#it is smaller than the structuring element.
#
#**Duality**
#
#As you should have noticed, many of these operations are simply the reverse
#of another operation. This duality can be summarized as follows:
#
# 1. Erosion <-> Dilation
#
# 2. Opening <-> Closing
#
# 3. White tophat <-> Black tophat
#
#Skeletonize
#===========
#
#Thinning is used to reduce each connected component in a binary image to a
#*single-pixel wide skeleton*. It is important to note that this is
#performed on binary images only.
horse = data.horse()
sk = skeletonize(horse == 0)
plot_comparison(horse, sk, 'skeletonize')
######################################################################
#
# As the name suggests, this technique is used to thin the image to 1-pixel
# wide skeleton by applying thinning successively.
#
# Convex hull
# ===========
#
# The ``convex_hull_image`` is the *set of pixels included in the smallest
# convex polygon that surround all white pixels in the input image*. Again
# note that this is also performed on binary images.
hull1 = convex_hull_image(horse == 0)
plot_comparison(horse, hull1, 'convex hull')
######################################################################
# As the figure illustrates, ``convex_hull_image`` gives the smallest polygon
# which covers the white or True completely in the image.
#
# If we add a small grain to the image, we can see how the convex hull adapts
# to enclose that grain:
import numpy as np
horse_mask = horse == 0
horse_mask[45:50, 75:80] = 1
hull2 = convex_hull_image(horse_mask)
plot_comparison(horse_mask, hull2, 'convex hull')
######################################################################
#
# Additional Resources
# ====================
#
# 1. `MathWorks tutorial on morphological processing
# <https://se.mathworks.com/help/images/morphological-dilation-and-erosion.html>`_
#
# 2. `Auckland university's tutorial on Morphological Image Processing
# <https://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures/ImageProcessing-html/topic4.htm>`_
#
# 3. https://en.wikipedia.org/wiki/Mathematical_morphology
plt.show()

View File

@ -0,0 +1,722 @@
"""
============
Rank filters
============
Rank filters are non-linear filters using local gray-level ordering to
compute the filtered value. This ensemble of filters share a common base:
the local gray-level histogram is computed on the neighborhood of a pixel
(defined by a 2D structuring element). If the filtered value is taken as the
middle value of the histogram, we get the classical median filter.
Rank filters can be used for several purposes, such as:
* image quality enhancement,
e.g., image smoothing, sharpening
* image pre-processing,
e.g., noise reduction, contrast enhancement
* feature extraction,
e.g., border detection, isolated point detection
* image post-processing,
e.g., small object removal, object grouping, contour smoothing
Some well-known filters (e.g., morphological dilation and morphological
erosion) are specific cases of rank filters [1]_.
In this example, we will see how to filter a gray-level image using some of the
linear and non-linear filters available in skimage. We use the ``camera`` image
from `skimage.data` for all comparisons.
.. [1] Pierre Soille, On morphological operators based on rank filters, Pattern
Recognition 35 (2002) 527-535, :DOI:`10.1016/S0031-3203(01)00047-4`
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.util import img_as_ubyte
from skimage import data
from skimage.exposure import histogram
noisy_image = img_as_ubyte(data.camera())
hist, hist_centers = histogram(noisy_image)
fig, ax = plt.subplots(ncols=2, figsize=(10, 5))
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].axis('off')
ax[1].plot(hist_centers, hist, lw=2)
ax[1].set_title('Gray-level histogram')
plt.tight_layout()
######################################################################
#
# Noise removal
# =============
#
# Some noise is added to the image: 1% of pixels are randomly set to 255, 1%
# are randomly set to 0. The **median** filter is applied to remove the
# noise.
from skimage.filters.rank import median
from skimage.morphology import disk, ball
noise = np.random.random(noisy_image.shape)
noisy_image = img_as_ubyte(data.camera())
noisy_image[noise > 0.99] = 255
noisy_image[noise < 0.01] = 0
fig, axes = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax[0].set_title('Noisy image')
ax[1].imshow(median(noisy_image, disk(1)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax[1].set_title('Median $r=1$')
ax[2].imshow(median(noisy_image, disk(5)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax[2].set_title('Median $r=5$')
ax[3].imshow(median(noisy_image, disk(20)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax[3].set_title('Median $r=20$')
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
#
# The added noise is efficiently removed, as the image defaults are small (1-\
# pixel wide), a small filter radius is sufficient. As the radius
# increases, objects with bigger sizes get filtered as well, such as the
# camera tripod. The median filter is often used for noise removal because it
# preserves borders. For example, consider noise which is located only on a
# few pixels in the entire image, as is the case with salt-and-pepper noise
# [2]_: the median filter will ignore the noisy pixels, for they will appear
# as outliers; thus, it will not change significantly the median of a group of
# local pixels, in contrast to what a moving average filter would do.
#
# .. [2] https://en.wikipedia.org/wiki/Salt-and-pepper_noise
#
# Image smoothing
# ===============
#
# The example hereunder shows how a local **mean** filter smooths the camera
# man image.
from skimage.filters.rank import mean
loc_mean = mean(noisy_image, disk(10))
fig, ax = plt.subplots(ncols=2, figsize=(10, 5), sharex=True, sharey=True)
ax[0].imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(loc_mean, vmin=0, vmax=255, cmap=plt.cm.gray)
ax[1].set_title('Local mean $r=10$')
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
#
# One may be interested in smoothing an image while preserving important
# borders (median filters already achieved this). Here, we use the
# **bilateral** filter that restricts the local neighborhood to pixels with
# gray levels similar to the central one.
#
# .. note::
#
# A different implementation is available for color images in
# :func:`skimage.restoration.denoise_bilateral`.
from skimage.filters.rank import mean_bilateral
noisy_image = img_as_ubyte(data.camera())
bilat = mean_bilateral(noisy_image.astype(np.uint16), disk(20), s0=10, s1=10)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10),
sharex='row', sharey='row')
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(bilat, cmap=plt.cm.gray)
ax[1].set_title('Bilateral mean')
ax[2].imshow(noisy_image[100:250, 350:450], cmap=plt.cm.gray)
ax[3].imshow(bilat[100:250, 350:450], cmap=plt.cm.gray)
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
# One can see that the large continuous part of the image (e.g. sky) is
# smoothed whereas other details are preserved.
#
# Contrast enhancement
# ====================
#
# We compare here how the global histogram equalization is applied locally.
#
# The equalized image [3]_ has a roughly linear cumulative distribution
# function for each pixel neighborhood. The local version [4]_ of
# histogram equalization emphasizes every local gray-level variation.
#
# .. [3] https://en.wikipedia.org/wiki/Histogram_equalization
# .. [4] https://en.wikipedia.org/wiki/Adaptive_histogram_equalization
from skimage import exposure
from skimage.filters import rank
noisy_image = img_as_ubyte(data.camera())
# equalize globally and locally
glob = exposure.equalize_hist(noisy_image) * 255
loc = rank.equalize(noisy_image, disk(20))
# extract histogram for each image
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
glob_hist = np.histogram(glob, bins=np.arange(0, 256))
loc_hist = np.histogram(loc, bins=np.arange(0, 256))
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(12, 12))
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].axis('off')
ax[1].plot(hist[1][:-1], hist[0], lw=2)
ax[1].set_title('Histogram of gray values')
ax[2].imshow(glob, cmap=plt.cm.gray)
ax[2].axis('off')
ax[3].plot(glob_hist[1][:-1], glob_hist[0], lw=2)
ax[3].set_title('Histogram of gray values')
ax[4].imshow(loc, cmap=plt.cm.gray)
ax[4].axis('off')
ax[5].plot(loc_hist[1][:-1], loc_hist[0], lw=2)
ax[5].set_title('Histogram of gray values')
plt.tight_layout()
######################################################################
# Another way to maximize the number of gray-levels used for an image is to
# apply a local auto-leveling, i.e. the gray-value of a pixel is
# proportionally remapped between local minimum and local maximum.
#
# The following example shows how local auto-level enhances the camara man
# picture.
from skimage.filters.rank import autolevel
noisy_image = img_as_ubyte(data.camera())
auto = autolevel(noisy_image.astype(np.uint16), disk(20))
fig, ax = plt.subplots(ncols=2, figsize=(10, 5), sharex=True, sharey=True)
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(auto, cmap=plt.cm.gray)
ax[1].set_title('Local autolevel')
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
# This filter is very sensitive to local outliers. One can
# moderate this using the percentile version of the auto-level filter
# which uses given percentiles (one inferior, one superior) in place
# of local minimum and maximum. The example below illustrates how the
# percentile parameters influence the local auto-level result.
from skimage.filters.rank import autolevel_percentile
image = data.camera()
selem = disk(20)
loc_autolevel = autolevel(image, selem=selem)
loc_perc_autolevel0 = autolevel_percentile(image, selem=selem, p0=.01, p1=.99)
loc_perc_autolevel1 = autolevel_percentile(image, selem=selem, p0=.05, p1=.95)
loc_perc_autolevel2 = autolevel_percentile(image, selem=selem, p0=.1, p1=.9)
loc_perc_autolevel3 = autolevel_percentile(image, selem=selem, p0=.15, p1=.85)
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(10, 10),
sharex=True, sharey=True)
ax = axes.ravel()
title_list = ['Original',
'auto_level',
'auto-level 1%',
'auto-level 5%',
'auto-level 10%',
'auto-level 15%']
image_list = [image,
loc_autolevel,
loc_perc_autolevel0,
loc_perc_autolevel1,
loc_perc_autolevel2,
loc_perc_autolevel3]
for i in range(0, len(image_list)):
ax[i].imshow(image_list[i], cmap=plt.cm.gray, vmin=0, vmax=255)
ax[i].set_title(title_list[i])
ax[i].axis('off')
plt.tight_layout()
######################################################################
# The morphological contrast enhancement filter replaces the central pixel by
# the local maximum if the original pixel value is closest to local maximum,
# otherwise by the minimum local.
from skimage.filters.rank import enhance_contrast
noisy_image = img_as_ubyte(data.camera())
enh = enhance_contrast(noisy_image, disk(5))
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10),
sharex='row', sharey='row')
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(enh, cmap=plt.cm.gray)
ax[1].set_title('Local morphological contrast enhancement')
ax[2].imshow(noisy_image[100:250, 350:450], cmap=plt.cm.gray)
ax[3].imshow(enh[100:250, 350:450], cmap=plt.cm.gray)
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
# The percentile version of the local morphological contrast enhancement uses
# percentile *p0* and *p1* instead of the local minimum and maximum.
from skimage.filters.rank import enhance_contrast_percentile
noisy_image = img_as_ubyte(data.camera())
penh = enhance_contrast_percentile(noisy_image, disk(5), p0=.1, p1=.9)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10),
sharex='row', sharey='row')
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(penh, cmap=plt.cm.gray)
ax[1].set_title('Local percentile morphological\n contrast enhancement')
ax[2].imshow(noisy_image[100:250, 350:450], cmap=plt.cm.gray)
ax[3].imshow(penh[100:250, 350:450], cmap=plt.cm.gray)
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
#
# Image threshold
# ===============
#
# The Otsu threshold method [5]_ can be applied locally using the local gray-\
# level distribution. In the example below, for each pixel, an "optimal"
# threshold is determined by maximizing the variance between two classes of
# pixels of the local neighborhood defined by a structuring element.
#
# These algorithms can be used on both 2D and 3D images.
#
# The example compares local thresholding with global thresholding, which is
# provided by :func:`skimage.filters.threshold_otsu`. Note that the former is
# much slower than the latter.
#
# .. [5] https://en.wikipedia.org/wiki/Otsu's_method
from skimage.filters.rank import otsu
from skimage.filters import threshold_otsu
from skimage import exposure
p8 = data.page()
radius = 10
selem = disk(radius)
# t_loc_otsu is an image
t_loc_otsu = otsu(p8, selem)
loc_otsu = p8 >= t_loc_otsu
# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(p8)
glob_otsu = p8 >= t_glob_otsu
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12),
sharex=True, sharey=True)
ax = axes.ravel()
fig.colorbar(ax[0].imshow(p8, cmap=plt.cm.gray), ax=ax[0])
ax[0].set_title('Original')
fig.colorbar(ax[1].imshow(t_loc_otsu, cmap=plt.cm.gray), ax=ax[1])
ax[1].set_title('Local Otsu ($r=%d$)' % radius)
ax[2].imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray)
ax[2].set_title('Original >= local Otsu' % t_glob_otsu)
ax[3].imshow(glob_otsu, cmap=plt.cm.gray)
ax[3].set_title('Global Otsu ($t=%d$)' % t_glob_otsu)
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
# The example below performs the same comparison, using a 3D image this time.
brain = exposure.rescale_intensity(data.brain().astype(float))
radius = 5
neighborhood = ball(radius)
# t_loc_otsu is an image
t_loc_otsu = rank.otsu(brain, neighborhood)
loc_otsu = brain >= t_loc_otsu
# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(brain)
glob_otsu = brain >= t_glob_otsu
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12),
sharex=True, sharey=True)
ax = axes.ravel()
slice_index = 3
fig.colorbar(ax[0].imshow(brain[slice_index], cmap=plt.cm.gray), ax=ax[0])
ax[0].set_title('Original')
fig.colorbar(ax[1].imshow(t_loc_otsu[slice_index], cmap=plt.cm.gray), ax=ax[1])
ax[1].set_title('Local Otsu ($r=%d$)' % radius)
ax[2].imshow(brain[slice_index] >= t_loc_otsu[slice_index], cmap=plt.cm.gray)
ax[2].set_title('Original >= local Otsu' % t_glob_otsu)
ax[3].imshow(glob_otsu[slice_index], cmap=plt.cm.gray)
ax[3].set_title('Global Otsu ($t=%d$)' % t_glob_otsu)
for a in ax:
a.axis('off')
fig.tight_layout()
######################################################################
# The following example shows how local Otsu thresholding handles a global
# level shift applied to a synthetic image.
n = 100
theta = np.linspace(0, 10 * np.pi, n)
x = np.sin(theta)
m = (np.tile(x, (n, 1)) * np.linspace(0.1, 1, n) * 128 + 128).astype(np.uint8)
radius = 10
t = rank.otsu(m, disk(radius))
fig, ax = plt.subplots(ncols=2, figsize=(10, 5),
sharex=True, sharey=True)
ax[0].imshow(m, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(m >= t, cmap=plt.cm.gray)
ax[1].set_title('Local Otsu ($r=%d$)' % radius)
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
# Image morphology
# ================
#
# Local maximum and local minimum are the base operators for gray-level
# morphology.
#
# Here is an example of the classical morphological gray-level filters:
# opening, closing and morphological gradient.
from skimage.filters.rank import maximum, minimum, gradient
noisy_image = img_as_ubyte(data.camera())
closing = maximum(minimum(noisy_image, disk(5)), disk(5))
opening = minimum(maximum(noisy_image, disk(5)), disk(5))
grad = gradient(noisy_image, disk(5))
# display results
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10),
sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(noisy_image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(closing, cmap=plt.cm.gray)
ax[1].set_title('Gray-level closing')
ax[2].imshow(opening, cmap=plt.cm.gray)
ax[2].set_title('Gray-level opening')
ax[3].imshow(grad, cmap=plt.cm.gray)
ax[3].set_title('Morphological gradient')
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
#
# Feature extraction
# ===================
#
# Local histograms can be exploited to compute local entropy, which is
# related to the local image complexity. Entropy is computed using base 2
# logarithm, i.e., the filter returns the minimum number of bits needed to
# encode local gray-level distribution.
#
# :func:`skimage.filters.rank.entropy` returns the local entropy on a given
# structuring element. The following example applies this filter
# on 8- and 16-bit images.
#
# .. note::
#
# To better use the available image bit, the function returns 10x entropy
# for 8-bit images and 1000x entropy for 16-bit images.
from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt
image = data.camera()
fig, ax = plt.subplots(ncols=2, figsize=(12, 6), sharex=True, sharey=True)
fig.colorbar(ax[0].imshow(image, cmap=plt.cm.gray), ax=ax[0])
ax[0].set_title('Image')
fig.colorbar(ax[1].imshow(entropy(image, disk(5)), cmap=plt.cm.gray), ax=ax[1])
ax[1].set_title('Entropy')
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
#
# Implementation
# ==============
#
# The central part of the `skimage.filters.rank` filters is built on a sliding
# window that updates the local gray-level histogram. This approach limits the
# algorithm complexity to O(n) where n is the number of image pixels. The
# complexity is also limited with respect to the structuring element size.
#
# In the following, we compare the performance of different implementations
# available in `skimage`.
from time import time
from scipy.ndimage import percentile_filter
from skimage.morphology import dilation
from skimage.filters.rank import median, maximum
def exec_and_timeit(func):
"""Decorator that returns both function results and execution time."""
def wrapper(*arg):
t1 = time()
res = func(*arg)
t2 = time()
ms = (t2 - t1) * 1000.0
return (res, ms)
return wrapper
@exec_and_timeit
def cr_med(image, selem):
return median(image=image, selem=selem)
@exec_and_timeit
def cr_max(image, selem):
return maximum(image=image, selem=selem)
@exec_and_timeit
def cm_dil(image, selem):
return dilation(image=image, selem=selem)
@exec_and_timeit
def ndi_med(image, n):
return percentile_filter(image, 50, size=n * 2 - 1)
######################################################################
# Comparison between
#
# * `skimage.filters.rank.maximum`
# * `skimage.morphology.dilation`
#
# on increasing structuring element size:
a = data.camera()
rec = []
e_range = range(1, 20, 2)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_max(a, elem)
rcm, ms_rcm = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots(figsize=(10, 10), sharey=True)
ax.set_title('Performance with respect to element size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
ax.plot(e_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
plt.tight_layout()
######################################################################
# and increasing image size:
r = 9
elem = disk(r + 1)
rec = []
s_range = range(100, 1000, 100)
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_max(a, elem)
(rcm, ms_rcm) = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
plt.tight_layout()
######################################################################
# Comparison between:
#
# * `skimage.filters.rank.median`
# * `scipy.ndimage.percentile_filter`
#
# on increasing structuring element size:
a = data.camera()
rec = []
e_range = range(2, 30, 4)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.plot(e_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
######################################################################
# Comparison of outcome of the two methods:
fig, ax = plt.subplots(ncols=2, figsize=(10, 5), sharex=True, sharey=True)
ax[0].set_title('filters.rank.median')
ax[0].imshow(rc, cmap=plt.cm.gray)
ax[1].set_title('scipy.ndimage.percentile')
ax[1].imshow(rndi, cmap=plt.cm.gray)
for a in ax:
a.axis('off')
plt.tight_layout()
######################################################################
# on increasing image size:
r = 9
elem = disk(r + 1)
rec = []
s_range = [100, 200, 500, 1000]
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
plt.tight_layout()
plt.show()

View File

@ -0,0 +1,232 @@
"""
============
Thresholding
============
Thresholding is used to create a binary image from a grayscale image [1]_.
It is the simplest way to segment objects from a background.
Thresholding algorithms implemented in scikit-image can be separated in two
categories:
- Histogram-based. The histogram of the pixels' intensity is used and
certain assumptions are made on the properties of this histogram (e.g. bimodal).
- Local. To process a pixel, only the neighboring pixels are used.
These algorithms often require more computation time.
If you are not familiar with the details of the different algorithms and the
underlying assumptions, it is often difficult to know which algorithm will give
the best results. Therefore, Scikit-image includes a function to evaluate
thresholding algorithms provided by the library. At a glance, you can select
the best algorithm for you data without a deep understanding of their
mechanisms.
.. [1] https://en.wikipedia.org/wiki/Thresholding_%28image_processing%29
.. seealso::
Presentation on
:ref:`sphx_glr_auto_examples_applications_plot_rank_filters.py`.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filters import try_all_threshold
img = data.page()
fig, ax = try_all_threshold(img, figsize=(10, 8), verbose=False)
plt.show()
######################################################################
# How to apply a threshold?
# =========================
#
# Now, we illustrate how to apply one of these thresholding algorithms.
# This example uses the mean value of pixel intensities. It is a simple
# and naive threshold value, which is sometimes used as a guess value.
#
from skimage.filters import threshold_mean
image = data.camera()
thresh = threshold_mean(image)
binary = image > thresh
fig, axes = plt.subplots(ncols=2, figsize=(8, 3))
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Original image')
ax[1].imshow(binary, cmap=plt.cm.gray)
ax[1].set_title('Result')
for a in ax:
a.axis('off')
plt.show()
######################################################################
# Bimodal histogram
# =================
#
# For pictures with a bimodal histogram, more specific algorithms can be used.
# For instance, the minimum algorithm takes a histogram of the image and smooths it
# repeatedly until there are only two peaks in the histogram.
from skimage.filters import threshold_minimum
image = data.camera()
thresh_min = threshold_minimum(image)
binary_min = image > thresh_min
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
ax[0, 0].imshow(image, cmap=plt.cm.gray)
ax[0, 0].set_title('Original')
ax[0, 1].hist(image.ravel(), bins=256)
ax[0, 1].set_title('Histogram')
ax[1, 0].imshow(binary_min, cmap=plt.cm.gray)
ax[1, 0].set_title('Thresholded (min)')
ax[1, 1].hist(image.ravel(), bins=256)
ax[1, 1].axvline(thresh_min, color='r')
for a in ax[:, 0]:
a.axis('off')
plt.show()
######################################################################
# Otsu's method [2]_ calculates an "optimal" threshold (marked by a red line in the
# histogram below) by maximizing the variance between two classes of pixels,
# which are separated by the threshold. Equivalently, this threshold minimizes
# the intra-class variance.
#
# .. [2] https://en.wikipedia.org/wiki/Otsu's_method
#
from skimage.filters import threshold_otsu
image = data.camera()
thresh = threshold_otsu(image)
binary = image > thresh
fig, axes = plt.subplots(ncols=3, figsize=(8, 2.5))
ax = axes.ravel()
ax[0] = plt.subplot(1, 3, 1)
ax[1] = plt.subplot(1, 3, 2)
ax[2] = plt.subplot(1, 3, 3, sharex=ax[0], sharey=ax[0])
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].hist(image.ravel(), bins=256)
ax[1].set_title('Histogram')
ax[1].axvline(thresh, color='r')
ax[2].imshow(binary, cmap=plt.cm.gray)
ax[2].set_title('Thresholded')
ax[2].axis('off')
plt.show()
######################################################################
# Local thresholding
# ==================
#
# If the image background is relatively uniform, then you can use a global
# threshold value as presented above. However, if there is large variation in the
# background intensity, adaptive thresholding (a.k.a. local or dynamic
# thresholding) may produce better results. Note that local is much slower than
# global thresholding.
#
# Here, we binarize an image using the `threshold_local` function, which
# calculates thresholds in regions with a characteristic size `block_size` surrounding
# each pixel (i.e. local neighborhoods). Each threshold value is the weighted mean
# of the local neighborhood minus an offset value.
#
from skimage.filters import threshold_otsu, threshold_local
image = data.page()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 35
local_thresh = threshold_local(image, block_size, offset=10)
binary_local = image > local_thresh
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax = axes.ravel()
plt.gray()
ax[0].imshow(image)
ax[0].set_title('Original')
ax[1].imshow(binary_global)
ax[1].set_title('Global thresholding')
ax[2].imshow(binary_local)
ax[2].set_title('Local thresholding')
for a in ax:
a.axis('off')
plt.show()
######################################################################
# Now, we show how Otsu's threshold [2]_ method can be applied locally. For
# each pixel, an "optimal" threshold is determined by maximizing the variance
# between two classes of pixels of the local neighborhood defined by a
# structuring element.
#
# The example compares the local threshold with the global threshold.
#
from skimage.morphology import disk
from skimage.filters import threshold_otsu, rank
from skimage.util import img_as_ubyte
img = img_as_ubyte(data.page())
radius = 15
selem = disk(radius)
local_otsu = rank.otsu(img, selem)
threshold_global_otsu = threshold_otsu(img)
global_otsu = img >= threshold_global_otsu
fig, axes = plt.subplots(2, 2, figsize=(8, 5), sharex=True, sharey=True)
ax = axes.ravel()
plt.tight_layout()
fig.colorbar(ax[0].imshow(img, cmap=plt.cm.gray),
ax=ax[0], orientation='horizontal')
ax[0].set_title('Original')
ax[0].axis('off')
fig.colorbar(ax[1].imshow(local_otsu, cmap=plt.cm.gray),
ax=ax[1], orientation='horizontal')
ax[1].set_title('Local Otsu (radius=%d)' % radius)
ax[1].axis('off')
ax[2].imshow(img >= local_otsu, cmap=plt.cm.gray)
ax[2].set_title('Original >= Local Otsu' % threshold_global_otsu)
ax[2].axis('off')
ax[3].imshow(global_otsu, cmap=plt.cm.gray)
ax[3].set_title('Global Otsu (threshold = %d)' % threshold_global_otsu)
ax[3].axis('off')
plt.show()

View File

@ -0,0 +1,2 @@
Manipulating exposure and color channels
----------------------------------------

View File

@ -0,0 +1,245 @@
"""
==================================
3D adaptive histogram equalization
==================================
Adaptive histogram equalization (AHE) can be used to improve the local
contrast of an image [1]_. Specifically, AHE can be useful for normalizing
intensities across images. This example compares the results of applying
global histogram equalization and AHE to a 3D image and a synthetically
degraded version of it.
.. [1] https://en.wikipedia.org/wiki/Histogram_equalization
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import cm, colors
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from skimage import exposure, util
import imageio as io
# Prepare data and apply histogram equalization
from skimage.data import cells3d
im_orig = util.img_as_float(cells3d()[:, 1, :, :]) # grab just the nuclei
# Reorder axis order from (z, y, x) to (x, y, z)
im_orig = im_orig.transpose()
# Rescale image data to range [0, 1]
im_orig = np.clip(im_orig,
np.percentile(im_orig, 5),
np.percentile(im_orig, 95))
im_orig = (im_orig - im_orig.min()) / (im_orig.max() - im_orig.min())
# Degrade image by applying exponential intensity decay along x
sigmoid = np.exp(-3 * np.linspace(0, 1, im_orig.shape[0]))
im_degraded = (im_orig.T * sigmoid).T
# Set parameters for AHE
# Determine kernel sizes in each dim relative to image shape
kernel_size = (im_orig.shape[0] // 5,
im_orig.shape[1] // 5,
im_orig.shape[2] // 2)
kernel_size = np.array(kernel_size)
clip_limit = 0.9
# Perform histogram equalization
im_orig_he, im_degraded_he = \
[exposure.equalize_hist(im)
for im in [im_orig, im_degraded]]
im_orig_ahe, im_degraded_ahe = \
[exposure.equalize_adapthist(im,
kernel_size=kernel_size,
clip_limit=clip_limit)
for im in [im_orig, im_degraded]]
# Define functions to help plot the data
def scalars_to_rgba(scalars, cmap, vmin=0., vmax=1., alpha=0.2):
"""
Convert array of scalars into array of corresponding RGBA values.
"""
norm = colors.Normalize(vmin=vmin, vmax=vmax)
scalar_map = cm.ScalarMappable(norm=norm, cmap=cmap)
rgbas = scalar_map.to_rgba(scalars)
rgbas[:, 3] = alpha
return rgbas
def plt_render_volume(vol, fig_ax, cmap,
vmin=0, vmax=1,
bin_widths=None, n_levels=20):
"""
Render a volume in a 3D matplotlib scatter plot.
Better would be to use napari.
"""
vol = np.clip(vol, vmin, vmax)
xs, ys, zs = np.mgrid[0:vol.shape[0]:bin_widths[0],
0:vol.shape[1]:bin_widths[1],
0:vol.shape[2]:bin_widths[2]]
vol_scaled = vol[::bin_widths[0],
::bin_widths[1],
::bin_widths[2]].flatten()
# Define alpha transfer function
levels = np.linspace(vmin, vmax, n_levels)
alphas = np.linspace(0, .7, n_levels)
alphas = alphas ** 11
alphas = (alphas - alphas.min()) / (alphas.max() - alphas.min())
alphas *= 0.8
# Group pixels by intensity and plot separately,
# as 3D scatter does not accept arrays of alpha values
for il in range(1, len(levels)):
sel = (vol_scaled >= levels[il - 1])
sel *= (vol_scaled <= levels[il])
if not np.max(sel):
continue
c = scalars_to_rgba(vol_scaled[sel], cmap,
vmin=vmin, vmax=vmax, alpha=alphas[il - 1])
fig_ax.scatter(xs.flatten()[sel],
ys.flatten()[sel],
zs.flatten()[sel],
c=c, s=0.5 * np.mean(bin_widths),
marker='o', linewidth=0)
# Create figure with subplots
cmap = 'Blues'
fig = plt.figure(figsize=(10, 6))
axs = [fig.add_subplot(2, 3, i + 1,
projection=Axes3D.name, facecolor="none")
for i in range(6)]
ims = [im_orig, im_orig_he, im_orig_ahe,
im_degraded, im_degraded_he, im_degraded_ahe]
# Prepare lines for the various boxes to be plotted
verts = np.array([[i, j, k] for i in [0, 1]
for j in [0, 1] for k in [0, 1]]).astype(np.float32)
lines = [np.array([i, j]) for i in verts
for j in verts if np.allclose(np.linalg.norm(i - j), 1)]
# "render" volumetric data
for iax, ax in enumerate(axs[:]):
plt_render_volume(ims[iax], ax, cmap, 0, 1, [2, 2, 2], 20)
# plot 3D box
rect_shape = np.array(im_orig.shape) + 2
for line in lines:
ax.plot((line * rect_shape)[:, 0] - 1,
(line * rect_shape)[:, 1] - 1,
(line * rect_shape)[:, 2] - 1,
linewidth=1, color='grey')
# Add boxes illustrating the kernels
ns = np.array(im_orig.shape) // kernel_size - 1
for axis_ind, vertex_ind, box_shape in zip([1] + [2] * 4,
[[0, 0, 0],
[ns[0] - 1, ns[1], ns[2] - 1],
[ns[0], ns[1] - 1, ns[2] - 1],
[ns[0], ns[1], ns[2] - 1],
[ns[0], ns[1], ns[2]]],
[np.array(im_orig.shape)]
+ [kernel_size] * 4):
for line in lines:
axs[axis_ind].plot(((line + vertex_ind) * box_shape)[:, 0],
((line + vertex_ind) * box_shape)[:, 1],
((line + vertex_ind) * box_shape)[:, 2],
linewidth=1.2, color='crimson')
# Plot degradation function
axs[3].scatter(xs=np.arange(len(sigmoid)),
ys=np.zeros(len(sigmoid)) + im_orig.shape[1],
zs=sigmoid * im_orig.shape[2],
s=5,
c=scalars_to_rgba(sigmoid,
cmap=cmap, vmin=0, vmax=1, alpha=1.)[:, :3])
# Subplot aesthetics (optimized for matplotlib 3.3)
for iax, ax in enumerate(axs[:]):
# Get rid of panes and axis lines
for dim_ax in [ax.xaxis, ax.yaxis, ax.zaxis]:
dim_ax.set_pane_color((1., 1., 1., 0.))
dim_ax.line.set_color((1., 1., 1., 0.))
# Define 3D axes limits, see https://github.com/
# matplotlib/matplotlib/issues/17172#issuecomment-617546105
xyzlim = np.array([ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d()]).T
XYZlim = np.asarray([min(xyzlim[0]), max(xyzlim[1])])
ax.set_xlim3d(XYZlim)
ax.set_ylim3d(XYZlim)
ax.set_zlim3d(XYZlim * 0.5)
try:
ax.set_aspect('equal')
except NotImplementedError:
pass
ax.set_xlabel('x', labelpad=-20)
ax.set_ylabel('y', labelpad=-20)
ax.text2D(0.63, 0.2, "z", transform=ax.transAxes)
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.grid(False)
ax.elev = 30
plt.subplots_adjust(left=0.05,
bottom=-0.1,
right=1.01,
top=1.1,
wspace=-0.1,
hspace=-0.45)
# Highlight AHE
rect_ax = fig.add_axes([0, 0, 1, 1], facecolor='none')
rect_ax.set_axis_off()
rect = patches.Rectangle((0.68, 0.01), 0.315, 0.98,
edgecolor='grey', facecolor='none',
linewidth=2, linestyle='--')
rect_ax.add_patch(rect)
# Add text
rect_ax.text(0.19, 0.34, '$I_{degr}(x,y,z) = e^{-x}I_{orig}(x,y,z)$',
fontsize=9, rotation=-15,
color=scalars_to_rgba([0.8], cmap='Blues', alpha=1.)[0])
fc = {'size': 14}
rect_ax.text(0.03, 0.58, r'$\it{Original}$' + '\ninput image',
rotation=90, fontdict=fc, horizontalalignment='center')
rect_ax.text(0.03, 0.16, r'$\it{Degraded}$' + '\ninput image',
rotation=90, fontdict=fc, horizontalalignment='center')
rect_ax.text(0.13, 0.91, 'Input volume:\n3D cell image', fontdict=fc)
rect_ax.text(0.51, 0.91, r'$\it{Global}$' + '\nhistogram equalization',
fontdict=fc, horizontalalignment='center')
rect_ax.text(0.84, 0.91,
r'$\it{Adaptive}$' + '\nhistogram equalization (AHE)',
fontdict=fc, horizontalalignment='center')
rect_ax.text(0.58, 0.82, 'non-local', fontsize=12, color='crimson')
rect_ax.text(0.87, 0.82, 'local kernel', fontsize=12, color='crimson')
# Add colorbar
cbar_ax = fig.add_axes([0.12, 0.43, 0.008, 0.08])
cbar_ax.imshow(np.arange(256).reshape(256, 1)[::-1],
cmap=cmap, aspect="auto")
cbar_ax.set_xticks([])
cbar_ax.set_yticks([0, 255])
cbar_ax.set_xticklabels([])
cbar_ax.set_yticklabels([1., 0.])
plt.show()

View File

@ -0,0 +1,118 @@
"""
=========================================
Adapting gray-scale filters to RGB images
=========================================
There are many filters that are designed to work with gray-scale images but not
with color images. To simplify the process of creating functions that can adapt
to RGB images, scikit-image provides the ``adapt_rgb`` decorator.
To actually use the ``adapt_rgb`` decorator, you have to decide how you want to
adapt the RGB image for use with the gray-scale filter. There are two
pre-defined handlers:
``each_channel``
Pass each of the RGB channels to the filter one-by-one, and stitch the
results back into an RGB image.
``hsv_value``
Convert the RGB image to HSV and pass the value channel to the filter.
The filtered result is inserted back into the HSV image and converted
back to RGB.
Below, we demonstrate the use of ``adapt_rgb`` on a couple of gray-scale
filters:
"""
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filters
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def sobel_hsv(image):
return filters.sobel(image)
######################################################################
# We can use these functions as we would normally use them, but now they work
# with both gray-scale and color images. Let's plot the results with a color
# image:
from skimage import data
from skimage.exposure import rescale_intensity
import matplotlib.pyplot as plt
image = data.astronaut()
fig, (ax_each, ax_hsv) = plt.subplots(ncols=2, figsize=(14, 7))
# We use 1 - sobel_each(image) but this won't work if image is not normalized
ax_each.imshow(rescale_intensity(1 - sobel_each(image)))
ax_each.set_xticks([]), ax_each.set_yticks([])
ax_each.set_title("Sobel filter computed\n on individual RGB channels")
# We use 1 - sobel_hsv(image) but this won't work if image is not normalized
ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image)))
ax_hsv.set_xticks([]), ax_hsv.set_yticks([])
ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)")
######################################################################
# Notice that the result for the value-filtered image preserves the color of
# the original image, but channel filtered image combines in a more
# surprising way. In other common cases, smoothing for example, the channel
# filtered image will produce a better result than the value-filtered image.
#
# You can also create your own handler functions for ``adapt_rgb``. To do so,
# just create a function with the following signature::
#
# def handler(image_filter, image, *args, **kwargs):
# # Manipulate RGB image here...
# image = image_filter(image, *args, **kwargs)
# # Manipulate filtered image here...
# return image
#
# Note that ``adapt_rgb`` handlers are written for filters where the image is
# the first argument.
#
# As a very simple example, we can just convert any RGB image to grayscale
# and then return the filtered result:
from skimage.color import rgb2gray
def as_gray(image_filter, image, *args, **kwargs):
gray_image = rgb2gray(image)
return image_filter(gray_image, *args, **kwargs)
######################################################################
# It's important to create a signature that uses ``*args`` and ``**kwargs``
# to pass arguments along to the filter so that the decorated function is
# allowed to have any number of positional and keyword arguments.
#
# Finally, we can use this handler with ``adapt_rgb`` just as before:
@adapt_rgb(as_gray)
def sobel_gray(image):
return filters.sobel(image)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(7, 7))
# We use 1 - sobel_gray(image) but this won't work if image is not normalized
ax.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title("Sobel filter computed\n on the converted grayscale image")
plt.show()
######################################################################
#
# .. note::
#
# A very simple check of the array shape is used for detecting RGB
# images, so ``adapt_rgb`` is not recommended for functions that support
# 3D volumes or color images in non-RGB spaces.

View File

@ -0,0 +1,102 @@
"""
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] https://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(image, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
image = img_as_float(image)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(image, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(image, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig = plt.figure(figsize=(8, 5))
axes = np.zeros((2, 4), dtype=np.object)
axes[0, 0] = fig.add_subplot(2, 4, 1)
for i in range(1, 4):
axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])
for i in range(0, 4):
axes[1, i] = fig.add_subplot(2, 4, 5+i)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,67 @@
"""
==================
Histogram matching
==================
This example demonstrates the feature of histogram matching. It manipulates the
pixels of an input image so that its histogram matches the histogram of the
reference image. If the images have multiple channels, the matching is done
independently for each channel, as long as the number of channels is equal in
the input image and the reference.
Histogram matching can be used as a lightweight normalisation for image
processing, such as feature matching, especially in circumstances where the
images have been taken from different sources or in different conditions (i.e.
lighting).
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage import exposure
from skimage.exposure import match_histograms
reference = data.coffee()
image = data.chelsea()
matched = match_histograms(image, reference, multichannel=True)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
sharex=True, sharey=True)
for aa in (ax1, ax2, ax3):
aa.set_axis_off()
ax1.imshow(image)
ax1.set_title('Source')
ax2.imshow(reference)
ax2.set_title('Reference')
ax3.imshow(matched)
ax3.set_title('Matched')
plt.tight_layout()
plt.show()
######################################################################
# To illustrate the effect of the histogram matching, we plot for each
# RGB channel, the histogram and the cumulative histogram. Clearly,
# the matched image has the same cumulative histogram as the reference
# image for each channel.
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(8, 8))
for i, img in enumerate((image, reference, matched)):
for c, c_color in enumerate(('red', 'green', 'blue')):
img_hist, bins = exposure.histogram(img[..., c], source_range='dtype')
axes[c, i].plot(bins, img_hist / img_hist.max())
img_cdf, bins = exposure.cumulative_distribution(img[..., c])
axes[c, i].plot(bins, img_cdf)
axes[c, 0].set_ylabel(c_color)
axes[0, 0].set_title('Source')
axes[0, 1].set_title('Reference')
axes[0, 2].set_title('Matched')
plt.tight_layout()
plt.show()

View File

@ -0,0 +1,81 @@
"""
==============================================
Immunohistochemical staining colors separation
==============================================
Color deconvolution consists of the separation of features by their colors.
In this example we separate the immunohistochemical (IHC) staining from the
hematoxylin counterstaining. The separation is achieved with the method
described in [1]_, known as "color deconvolution".
The IHC staining expression of the FHL2 protein is here revealed with
Diaminobenzidine (DAB) which gives a brown color.
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.color import rgb2hed, hed2rgb
# Example IHC image
ihc_rgb = data.immunohistochemistry()
# Separate the stains from the IHC image
ihc_hed = rgb2hed(ihc_rgb)
# Create an RGB image for each of the stains
null = np.zeros_like(ihc_hed[:, :, 0])
ihc_h = hed2rgb(np.stack((ihc_hed[:, :, 0], null, null), axis=-1))
ihc_e = hed2rgb(np.stack((null, ihc_hed[:, :, 1], null), axis=-1))
ihc_d = hed2rgb(np.stack((null, null, ihc_hed[:, :, 2]), axis=-1))
# Display
fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(ihc_rgb)
ax[0].set_title("Original image")
ax[1].imshow(ihc_h)
ax[1].set_title("Hematoxylin")
ax[2].imshow(ihc_e)
ax[2].set_title("Eosin") # Note that there is no Eosin stain in this image
ax[3].imshow(ihc_d)
ax[3].set_title("DAB")
for a in ax.ravel():
a.axis('off')
fig.tight_layout()
######################################################################
# Now we can easily manipulate the hematoxylin and DAB "channels":
from skimage.exposure import rescale_intensity
# Rescale hematoxylin and DAB signals and give them a fluorescence look
h = rescale_intensity(ihc_hed[:, :, 0], out_range=(0, 1),
in_range=(0, np.percentile(ihc_hed[:, :, 0], 99)))
d = rescale_intensity(ihc_hed[:, :, 2], out_range=(0, 1),
in_range=(0, np.percentile(ihc_hed[:, :, 2], 99)))
# Put the two channels into an RGB image as green and blue channels
zdh = np.dstack((null, d, h))
fig = plt.figure()
axis = plt.subplot(1, 1, 1, sharex=ax[0], sharey=ax[0])
axis.imshow(zdh)
axis.set_title("Stain separated image (rescaled)")
axis.axis('off')
plt.show()

View File

@ -0,0 +1,171 @@
"""
============================
Local Histogram Equalization
============================
This example enhances an image with low contrast, using a method called *local
histogram equalization*, which spreads out the most frequent intensity values
in an image.
The equalized image [1]_ has a roughly linear cumulative distribution function
for each pixel neighborhood.
The local version [2]_ of the histogram equalization emphasized every local
graylevel variations.
These algorithms can be used on both 2D and 3D images.
References
----------
.. [1] https://en.wikipedia.org/wiki/Histogram_equalization
.. [2] https://en.wikipedia.org/wiki/Adaptive_histogram_equalization
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from skimage import data
from skimage.util.dtype import dtype_range
from skimage.util import img_as_ubyte
from skimage import exposure
from skimage.morphology import disk
from skimage.morphology import ball
from skimage.filters import rank
matplotlib.rcParams['font.size'] = 9
def plot_img_and_hist(image, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(image, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(image.ravel(), bins=bins)
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
xmin, xmax = dtype_range[image.dtype.type]
ax_hist.set_xlim(xmin, xmax)
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(image, bins)
ax_cdf.plot(bins, img_cdf, 'r')
return ax_img, ax_hist, ax_cdf
# Load an example image
img = img_as_ubyte(data.moon())
# Global equalize
img_rescale = exposure.equalize_hist(img)
# Equalization
selem = disk(30)
img_eq = rank.equalize(img, selem=selem)
# Display results
fig = plt.figure(figsize=(8, 5))
axes = np.zeros((2, 3), dtype=np.object)
axes[0, 0] = plt.subplot(2, 3, 1)
axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0])
axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0])
axes[1, 0] = plt.subplot(2, 3, 4)
axes[1, 1] = plt.subplot(2, 3, 5)
axes[1, 2] = plt.subplot(2, 3, 6)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
ax_hist.set_ylabel('Number of pixels')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Global equalise')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Local equalize')
ax_cdf.set_ylabel('Fraction of total intensity')
# prevent overlap of y-axis labels
fig.tight_layout()
######################################################################
#
# 3D Equalization
# ===============
#
# 3D Volumes can also be equalized in a similar fashion.
# Here the histograms are collected from the entire 3D image, but
# only a single slice is shown for visual inspection.
matplotlib.rcParams['font.size'] = 9
def plot_img_and_hist(image, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display Slice of Image
ax_img.imshow(image[0], cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(image.ravel(), bins=bins)
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
xmin, xmax = dtype_range[image.dtype.type]
ax_hist.set_xlim(xmin, xmax)
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(image, bins)
ax_cdf.plot(bins, img_cdf, 'r')
return ax_img, ax_hist, ax_cdf
# Load an example image
img = img_as_ubyte(data.brain())
# Global equalization
img_rescale = exposure.equalize_hist(img)
# Local equalization
neighborhood = ball(3)
img_eq = rank.equalize(img, selem=neighborhood)
# Display results
fig, axes = plt.subplots(2, 3, figsize=(8, 5))
axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0])
axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0])
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
ax_hist.set_ylabel('Number of pixels')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Global equalize')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Local equalize')
ax_cdf.set_ylabel('Fraction of total intensity')
# prevent overlap of y-axis labels
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,84 @@
"""
=================================
Gamma and log contrast adjustment
=================================
This example adjusts image contrast by performing a Gamma and a Logarithmic
correction on the input image.
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(image, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
image = img_as_float(image)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(image, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(image.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(image, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Gamma
gamma_corrected = exposure.adjust_gamma(img, 2)
# Logarithmic
logarithmic_corrected = exposure.adjust_log(img, 1)
# Display results
fig = plt.figure(figsize=(8, 5))
axes = np.zeros((2, 3), dtype=np.object)
axes[0, 0] = plt.subplot(2, 3, 1)
axes[0, 1] = plt.subplot(2, 3, 2, sharex=axes[0, 0], sharey=axes[0, 0])
axes[0, 2] = plt.subplot(2, 3, 3, sharex=axes[0, 0], sharey=axes[0, 0])
axes[1, 0] = plt.subplot(2, 3, 4)
axes[1, 1] = plt.subplot(2, 3, 5)
axes[1, 2] = plt.subplot(2, 3, 6)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(gamma_corrected, axes[:, 1])
ax_img.set_title('Gamma correction')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(logarithmic_corrected, axes[:, 2])
ax_img.set_title('Logarithmic correction')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,109 @@
"""
=========================
Filtering regional maxima
=========================
Here, we use morphological reconstruction to create a background image, which
we can subtract from the original image to isolate bright features (regional
maxima).
First we try reconstruction by dilation starting at the edges of the image. We
initialize a seed image to the minimum intensity of the image, and set its
border to be the pixel values in the original image. These maximal pixels will
get dilated in order to reconstruct the background image.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
from skimage import data
from skimage import img_as_float
from skimage.morphology import reconstruction
# Convert to float: Important for subtraction later which won't work with uint8
image = img_as_float(data.coins())
image = gaussian_filter(image, 1)
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
mask = image
dilated = reconstruction(seed, mask, method='dilation')
######################################################################
# Subtracting the dilated image leaves an image with just the coins and a
# flat, black background, as shown below.
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1,
ncols=3,
figsize=(8, 2.5),
sharex=True,
sharey=True)
ax0.imshow(image, cmap='gray')
ax0.set_title('original image')
ax0.axis('off')
ax1.imshow(dilated, vmin=image.min(), vmax=image.max(), cmap='gray')
ax1.set_title('dilated')
ax1.axis('off')
ax2.imshow(image - dilated, cmap='gray')
ax2.set_title('image - dilated')
ax2.axis('off')
fig.tight_layout()
######################################################################
# Although the features (i.e. the coins) are clearly isolated, the coins
# surrounded by a bright background in the original image are dimmer in the
# subtracted image. We can attempt to correct this using a different seed
# image.
#
# Instead of creating a seed image with maxima along the image border, we can
# use the features of the image itself to seed the reconstruction process.
# Here, the seed image is the original image minus a fixed value, ``h``.
h = 0.4
seed = image - h
dilated = reconstruction(seed, mask, method='dilation')
hdome = image - dilated
######################################################################
# To get a feel for the reconstruction process, we plot the intensity of the
# mask, seed, and dilated images along a slice of the image (indicated by red
# line).
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(8, 2.5))
yslice = 197
ax0.plot(mask[yslice], '0.5', label='mask')
ax0.plot(seed[yslice], 'k', label='seed')
ax0.plot(dilated[yslice], 'r', label='dilated')
ax0.set_ylim(-0.2, 2)
ax0.set_title('image slice')
ax0.set_xticks([])
ax0.legend()
ax1.imshow(dilated, vmin=image.min(), vmax=image.max(), cmap='gray')
ax1.axhline(yslice, color='r', alpha=0.4)
ax1.set_title('dilated')
ax1.axis('off')
ax2.imshow(hdome, cmap='gray')
ax2.axhline(yslice, color='r', alpha=0.4)
ax2.set_title('image - dilated')
ax2.axis('off')
fig.tight_layout()
plt.show()
######################################################################
# As you can see in the image slice, each coin is given a different baseline
# intensity in the reconstructed image; this is because we used the local
# intensity (shifted by ``h``) as a seed value. As a result, the coins in the
# subtracted image have similar pixel intensities. The final result is known
# as the h-dome of an image since this tends to isolate regional maxima of
# height ``h``. This operation is particularly useful when your images are
# unevenly illuminated.

View File

@ -0,0 +1,39 @@
"""
================
RGB to grayscale
================
This example converts an image with RGB channels into an image with a single
grayscale channel.
The value of each grayscale pixel is calculated as the weighted sum of the
corresponding red, green and blue pixels as::
Y = 0.2125 R + 0.7154 G + 0.0721 B
These weights are used by CRT phosphors as they better represent human
perception of red, green and blue than equal weights. [1]_
References
----------
.. [1] http://poynton.ca/PDFs/ColorFAQ.pdf
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.color import rgb2gray
original = data.astronaut()
grayscale = rgb2gray(original)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
ax[0].imshow(original)
ax[0].set_title("Original")
ax[1].imshow(grayscale, cmap=plt.cm.gray)
ax[1].set_title("Grayscale")
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,82 @@
"""
==========
RGB to HSV
==========
This example illustrates how RGB to HSV (Hue, Saturation, Value) conversion
[1]_ can be used to facilitate segmentation processes.
Usually, objects in images have distinct colors (hues) and luminosities, so
that these features can be used to separate different areas of the image.
In the RGB representation the hue and the luminosity are expressed as a linear
combination of the R,G,B channels, whereas they correspond to single channels
of the HSV image (the Hue and the Value channels). A simple segmentation of the
image can then be effectively performed by a mere thresholding of the HSV
channels.
.. [1] https://en.wikipedia.org/wiki/HSL_and_HSV
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.color import rgb2hsv
##############################################################################
# We first load the RGB image and extract the Hue and Value channels:
rgb_img = data.coffee()
hsv_img = rgb2hsv(rgb_img)
hue_img = hsv_img[:, :, 0]
value_img = hsv_img[:, :, 2]
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(8, 2))
ax0.imshow(rgb_img)
ax0.set_title("RGB image")
ax0.axis('off')
ax1.imshow(hue_img, cmap='hsv')
ax1.set_title("Hue channel")
ax1.axis('off')
ax2.imshow(value_img)
ax2.set_title("Value channel")
ax2.axis('off')
fig.tight_layout()
##############################################################################
# We then set a threshold on the Hue channel to separate the cup from the
# background:
hue_threshold = 0.04
binary_img = hue_img > hue_threshold
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(8, 3))
ax0.hist(hue_img.ravel(), 512)
ax0.set_title("Histogram of the Hue channel with threshold")
ax0.axvline(x=hue_threshold, color='r', linestyle='dashed', linewidth=2)
ax0.set_xbound(0, 0.12)
ax1.imshow(binary_img)
ax1.set_title("Hue-thresholded image")
ax1.axis('off')
fig.tight_layout()
##############################################################################
# We finally perform an additional thresholding on the Value channel to partly
# remove the shadow of the cup:
fig, ax0 = plt.subplots(figsize=(4, 3))
value_threshold = 0.10
binary_img = (hue_img > hue_threshold) | (value_img < value_threshold)
ax0.imshow(binary_img)
ax0.set_title("Hue and value thresholded image")
ax0.axis('off')
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,145 @@
"""
=========================
Tinting gray-scale images
=========================
It can be useful to artificially tint an image with some color, either to
highlight particular regions of an image or maybe just to liven up a grayscale
image. This example demonstrates image-tinting by scaling RGB values and by
adjusting colors in the HSV color-space.
In 2D, color images are often represented in RGB---3 layers of 2D arrays, where
the 3 layers represent (R)ed, (G)reen and (B)lue channels of the image. The
simplest way of getting a tinted image is to set each RGB channel to the
grayscale image scaled by a different multiplier for each channel. For example,
multiplying the green and blue channels by 0 leaves only the red channel and
produces a bright red image. Similarly, zeroing-out the blue channel leaves
only the red and green channels, which combine to form yellow.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage import color
from skimage import img_as_float
grayscale_image = img_as_float(data.camera()[::2, ::2])
image = color.gray2rgb(grayscale_image)
red_multiplier = [1, 0, 0]
yellow_multiplier = [1, 1, 0]
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4),
sharex=True, sharey=True)
ax1.imshow(red_multiplier * image)
ax2.imshow(yellow_multiplier * image)
######################################################################
# In many cases, dealing with RGB values may not be ideal. Because of that,
# there are many other `color spaces`_ in which you can represent a color
# image. One popular color space is called HSV, which represents hue (~the
# color), saturation (~colorfulness), and value (~brightness). For example, a
# color (hue) might be green, but its saturation is how intense that green is
# ---where olive is on the low end and neon on the high end.
#
# In some implementations, the hue in HSV goes from 0 to 360, since hues wrap
# around in a circle. In scikit-image, however, hues are float values from 0
# to 1, so that hue, saturation, and value all share the same scale.
#
# .. _color spaces:
# https://en.wikipedia.org/wiki/List_of_color_spaces_and_their_uses
#
# Below, we plot a linear gradient in the hue, with the saturation and value
# turned all the way up:
import numpy as np
hue_gradient = np.linspace(0, 1)
hsv = np.ones(shape=(1, len(hue_gradient), 3), dtype=float)
hsv[:, :, 0] = hue_gradient
all_hues = color.hsv2rgb(hsv)
fig, ax = plt.subplots(figsize=(5, 2))
# Set image extent so hues go from 0 to 1 and the image is a nice aspect ratio.
ax.imshow(all_hues, extent=(0, 1, 0, 0.2))
ax.set_axis_off()
######################################################################
# Notice how the colors at the far left and far right are the same. That
# reflects the fact that the hues wrap around like the color wheel (see HSV_
# for more info).
#
# .. _HSV: https://en.wikipedia.org/wiki/HSL_and_HSV
#
# Now, let's create a little utility function to take an RGB image and:
#
# 1. Transform the RGB image to HSV 2. Set the hue and saturation 3.
# Transform the HSV image back to RGB
def colorize(image, hue, saturation=1):
""" Add color of the given hue to an RGB image.
By default, set the saturation to 1 so that the colors pop!
"""
hsv = color.rgb2hsv(image)
hsv[:, :, 1] = saturation
hsv[:, :, 0] = hue
return color.hsv2rgb(hsv)
######################################################################
# Notice that we need to bump up the saturation; images with zero saturation
# are grayscale, so we need to a non-zero value to actually see the color
# we've set.
#
# Using the function above, we plot six images with a linear gradient in the
# hue and a non-zero saturation:
hue_rotations = np.linspace(0, 1, 6)
fig, axes = plt.subplots(nrows=2, ncols=3, sharex=True, sharey=True)
for ax, hue in zip(axes.flat, hue_rotations):
# Turn down the saturation to give it that vintage look.
tinted_image = colorize(image, hue, saturation=0.3)
ax.imshow(tinted_image, vmin=0, vmax=1)
ax.set_axis_off()
fig.tight_layout()
######################################################################
# You can combine this tinting effect with numpy slicing and fancy-indexing
# to selectively tint your images. In the example below, we set the hue of
# some rectangles using slicing and scale the RGB values of some pixels found
# by thresholding. In practice, you might want to define a region for tinting
# based on segmentation results or blob detection methods.
from skimage.filters import rank
# Square regions defined as slices over the first two dimensions.
top_left = (slice(25),) * 2
bottom_right = (slice(-25, None),) * 2
sliced_image = image.copy()
sliced_image[top_left] = colorize(image[top_left], 0.82, saturation=0.5)
sliced_image[bottom_right] = colorize(image[bottom_right], 0.5, saturation=0.5)
# Create a mask selecting regions with interesting texture.
noisy = rank.entropy(grayscale_image, np.ones((9, 9)))
textured_regions = noisy > 4.25
# Note that using `colorize` here is a bit more difficult, since `rgb2hsv`
# expects an RGB image (height x width x channel), but fancy-indexing returns
# a set of RGB pixels (# pixels x channel).
masked_image = image.copy()
masked_image[textured_regions, :] *= red_multiplier
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4),
sharex=True, sharey=True)
ax1.imshow(sliced_image)
ax2.imshow(masked_image)
plt.show()
######################################################################
# For coloring multiple regions, you may also be interested in
# `skimage.color.label2rgb <http://scikit-
# image.org/docs/0.9.x/api/skimage.color.html#label2rgb>`_.

View File

@ -0,0 +1,2 @@
Data
----

View File

@ -0,0 +1,26 @@
"""
==========================================
Datasets with 3 or more spatial dimensions
==========================================
Most scikit-image functions are compatible with 3D datasets, i.e., images with
3 spatial dimensions (to be distinguished from 2D multichannel images, which
are also arrays with
three axes). :func:`skimage.data.cells3d` returns a 3D fluorescence microscopy
image of cells. The returned dataset is a 3D multichannel image with dimensions
provided in ``(z, c, y, x)`` order. Channel 0 contains cell membranes, while channel
1 contains nuclei.
The example below shows how to explore this dataset. This 3D image can be used
to test the various functions of scikit-image.
"""
from skimage import data
import plotly
import plotly.express as px
img = data.cells3d()[20:]
fig = px.imshow(img, facet_col=1, animation_frame=0,
binary_string=True, binary_format='jpg')
fig.layout.annotations[0]['text'] = 'Cell membranes'
fig.layout.annotations[1]['text'] = 'Nuclei'
plotly.io.show(fig)

View File

@ -0,0 +1,47 @@
"""
======================
General-purpose images
======================
The title of each image indicates the name of the function.
"""
import matplotlib.pyplot as plt
import matplotlib
from skimage import data
matplotlib.rcParams['font.size'] = 18
images = ('astronaut',
'binary_blobs',
'brick',
'colorwheel',
'camera',
'cat',
'checkerboard',
'clock',
'coffee',
'coins',
'eagle',
'grass',
'gravel',
'horse',
'logo',
'page',
'text',
'rocket',
)
for name in images:
caller = getattr(data, name)
image = caller()
plt.figure()
plt.title(name)
if image.ndim == 2:
plt.imshow(image, cmap=plt.cm.gray)
else:
plt.imshow(image)
plt.show()

View File

@ -0,0 +1,39 @@
"""
=================
Scientific images
=================
The title of each image indicates the name of the function.
"""
import matplotlib.pyplot as plt
import matplotlib
from skimage import data
matplotlib.rcParams['font.size'] = 18
images = ('hubble_deep_field',
'immunohistochemistry',
'lily',
'microaneurysms',
'moon',
'retina',
'shepp_logan_phantom',
'skin',
'cell',
'human_mitosis',
)
for name in images:
caller = getattr(data, name)
image = caller()
plt.figure()
plt.title(name)
if image.ndim == 2:
plt.imshow(image, cmap=plt.cm.gray)
else:
plt.imshow(image)
plt.show()

View File

@ -0,0 +1,63 @@
"""
===============
Specific images
===============
"""
import matplotlib.pyplot as plt
import matplotlib
from skimage import data
matplotlib.rcParams['font.size'] = 18
######################################################################
#
# Stereo images
# =============
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
images = data.stereo_motorcycle()
ax[0].imshow(images[0])
ax[1].imshow(images[1])
fig.tight_layout()
plt.show()
######################################################################
#
# PIV images
# =============
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
images = data.vortex()
ax[0].imshow(images[0])
ax[1].imshow(images[1])
fig.tight_layout()
plt.show()
######################################################################
#
# Faces and non-faces dataset
# ===========================
#
# A sample of 20 over 200 images is displayed.
fig, axes = plt.subplots(4, 5, figsize=(20, 20))
ax = axes.ravel()
images = data.lfw_subset()
for i in range(20):
ax[i].imshow(images[90+i], cmap=plt.cm.gray)
ax[i].axis('off')
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,8 @@
Examples for developers
-----------------------
In this folder, we have examples for advanced topics, including detailed
explanations of the inner workings of certain algorithms.
These examples require some basic knowledge of image processing. They are
targeted at existing or would-be scikit-image developers wishing to develop
their knowledge of image processing algorithms.

View File

@ -0,0 +1,293 @@
"""
========
Max-tree
========
The max-tree is a hierarchical representation of an image that is the basis
for a large family of morphological filters.
If we apply a threshold operation to an image, we obtain a binary image
containing one or several connected components. If we apply a lower
threshold, all the connected components from the higher threshold are
contained in the connected components from the lower threshold. This
naturally defines a hierarchy of nested components that can be
represented by a tree. whenever a connected component A obtained by
thresholding with threshold t1 is contained in a component B obtained by
thresholding with threshold t1 < t2, we say that B is the parent of A.
The resulting tree structure is called a component tree. The max-tree
is a compact representation of such a component tree. [1]_, [2]_, [3]_, [4]_
In this example we give an intuition of what a max-tree is.
References
----------
.. [1] Salembier, P., Oliveras, A., & Garrido, L. (1998). Antiextensive
Connected Operators for Image and Sequence Processing.
IEEE Transactions on Image Processing, 7(4), 555-570.
:DOI:`10.1109/83.663500`
.. [2] Berger, C., Geraud, T., Levillain, R., Widynski, N., Baillard, A.,
Bertin, E. (2007). Effective Component Tree Computation with
Application to Pattern Recognition in Astronomical Imaging.
In International Conference on Image Processing (ICIP) (pp. 41-44).
:DOI:`10.1109/ICIP.2007.4379949`
.. [3] Najman, L., & Couprie, M. (2006). Building the component tree in
quasi-linear time. IEEE Transactions on Image Processing, 15(11),
3531-3539.
:DOI:`10.1109/TIP.2006.877518`
.. [4] Carlinet, E., & Geraud, T. (2014). A Comparative Review of
Component Tree Computation Algorithms. IEEE Transactions on Image
Processing, 23(9), 3885-3895.
:DOI:`10.1109/TIP.2014.2336551`
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from skimage.morphology import max_tree
import networkx as nx
#####################################################################
# Before we start : a few helper functions
def plot_img(ax, image, title, plot_text, image_values):
"""Plot an image, overlaying image values or indices."""
ax.imshow(image, cmap='gray', aspect='equal', vmin=0, vmax=np.max(image))
ax.set_title(title)
ax.set_yticks([])
ax.set_xticks([])
for x in np.arange(-0.5, image.shape[0], 1.0):
ax.add_artist(Line2D((x, x), (-0.5, image.shape[0] - 0.5),
color='blue', linewidth=2))
for y in np.arange(-0.5, image.shape[1], 1.0):
ax.add_artist(Line2D((-0.5, image.shape[1]), (y, y),
color='blue', linewidth=2))
if plot_text:
for i, j in np.ndindex(*image_values.shape):
ax.text(j, i, image_values[i, j], fontsize=8,
horizontalalignment='center',
verticalalignment='center',
color='red')
return
def prune(G, node, res):
"""Transform a canonical max tree to a max tree."""
value = G.nodes[node]['value']
res[node] = str(node)
preds = [p for p in G.predecessors(node)]
for p in preds:
if (G.nodes[p]['value'] == value):
res[node] += ', %i' % p
G.remove_node(p)
else:
prune(G, p, res)
G.nodes[node]['label'] = res[node]
return
def accumulate(G, node, res):
"""Transform a max tree to a component tree."""
total = G.nodes[node]['label']
parents = G.predecessors(node)
for p in parents:
total += ', ' + accumulate(G, p, res)
res[node] = total
return total
def position_nodes_for_max_tree(G, image_rav, root_x=4, delta_x=1.2):
"""Set the position of nodes of a max-tree.
This function helps to visually distinguish between nodes at the same
level of the hierarchy and nodes at different levels.
"""
pos = {}
for node in reversed(list(nx.topological_sort(canonical_max_tree))):
value = G.nodes[node]['value']
if canonical_max_tree.out_degree(node) == 0:
# root
pos[node] = (root_x, value)
in_nodes = [y for y in canonical_max_tree.predecessors(node)]
# place the nodes at the same level
level_nodes = [y for y in
filter(lambda x: image_rav[x] == value, in_nodes)]
nb_level_nodes = len(level_nodes) + 1
c = nb_level_nodes // 2
i = - c
if (len(level_nodes) < 3):
hy = 0
m = 0
else:
hy = 0.25
m = hy / (c - 1)
for level_node in level_nodes:
if(i == 0):
i += 1
if (len(level_nodes) < 3):
pos[level_node] = (pos[node][0] + i * 0.6 * delta_x, value)
else:
pos[level_node] = (pos[node][0] + i * 0.6 * delta_x,
value + m * (2 * np.abs(i) - c - 1))
i += 1
# place the nodes at different levels
other_level_nodes = [y for y in
filter(lambda x: image_rav[x] > value, in_nodes)]
if (len(other_level_nodes) == 1):
i = 0
else:
i = - len(other_level_nodes) // 2
for other_level_node in other_level_nodes:
if((len(other_level_nodes) % 2 == 0) and (i == 0)):
i += 1
pos[other_level_node] = (pos[node][0] + i * delta_x,
image_rav[other_level_node])
i += 1
return pos
def plot_tree(graph, positions, ax, *, title='', labels=None,
font_size=8, text_size=8):
"""Plot max and component trees."""
nx.draw_networkx(graph, pos=positions, ax=ax,
node_size=40, node_shape='s', node_color='white',
font_size=font_size, labels=labels)
xlimit = ax.get_xlim()
for v in range(image_rav.min(), image_rav.max() + 1):
ax.hlines(v - 0.5, -3, 10, linestyles='dotted')
ax.text(-3, v - 0.15, "val: %i" % v, fontsize=text_size)
ax.hlines(v + 0.5, -3, 10, linestyles='dotted')
ax.set_xlim(-3, 10)
ax.set_title(title)
ax.set_axis_off()
#####################################################################
# Image Definition
# ================
# We define a small test image.
# For clarity, we choose an example image, where image values cannot be
# confounded with indices (different range).
image = np.array([[40, 40, 39, 39, 38],
[40, 41, 39, 39, 39],
[30, 30, 30, 32, 32],
[33, 33, 30, 32, 35],
[30, 30, 30, 33, 36]], dtype=np.uint8)
#####################################################################
# Max-tree
# ========
# Next, we calculate the max-tree of this image.
# max-tree of the image
P, S = max_tree(image)
P_rav = P.ravel()
#####################################################################
# Image plots
# ===========
# Then, we visualize the image and its raveled indices.
# Concretely, we plot the image with the following overlays:
# - the image values
# - the raveled indices (serve as pixel identifiers)
# - the output of the max_tree function
# raveled image
image_rav = image.ravel()
# raveled indices of the example image (for display purpose)
raveled_indices = np.arange(image.size).reshape(image.shape)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(9, 3))
plot_img(ax1, image - image.min(), 'Image Values',
plot_text=True, image_values=image)
plot_img(ax2, image - image.min(), 'Raveled Indices',
plot_text=True, image_values=raveled_indices)
plot_img(ax3, image - image.min(), 'Max-tree indices',
plot_text=True, image_values=P)
#####################################################################
# Visualizing threshold operations
# ================================
# Now, we investigate the results of a series of threshold operations.
# The component tree (and max-tree) provide representations of the
# inclusion relationships between connected components at different
# levels.
fig, axes = plt.subplots(3, 3, sharey=True, sharex=True, figsize=(6, 6))
thresholds = np.unique(image)
for k, threshold in enumerate(thresholds):
bin_img = image >= threshold
plot_img(axes[(k // 3), (k % 3)], bin_img, 'Threshold : %i' % threshold,
plot_text=True, image_values=raveled_indices)
#####################################################################
# Max-tree plots
# ==============
# Now, we plot the component and max-trees. A component tree relates
# the different pixel sets resulting from all possible threshold operations
# to each other. There is an arrow in the graph, if a component at one level
# is included in the component of a lower level. The max-tree is just
# a different encoding of the pixel sets.
#
# 1. the component tree: pixel sets are explicitly written out. We see for
# instance that {6} (result of applying a threshold at 41) is the parent
# of {0, 1, 5, 6} (threshold at 40).
# 2. the max-tree: only pixels that come into the set at this level
# are explicitly written out. We therefore will write
# {6} -> {0,1,5} instead of {6} -> {0, 1, 5, 6}
# 3. the canonical max-treeL this is the representation which is given by
# our implementation. Here, every pixel is a node. Connected components
# of several pixels are represented by one of the pixels. We thus replace
# {6} -> {0,1,5} by {6} -> {5}, {1} -> {5}, {0} -> {5}
# This allows us to represent the graph by an image (top row, third column).
# the canonical max-tree graph
canonical_max_tree = nx.DiGraph()
canonical_max_tree.add_nodes_from(S)
for node in canonical_max_tree.nodes():
canonical_max_tree.nodes[node]['value'] = image_rav[node]
canonical_max_tree.add_edges_from([(n, P_rav[n]) for n in S[1:]])
# max-tree from the canonical max-tree
nx_max_tree = nx.DiGraph(canonical_max_tree)
labels = {}
prune(nx_max_tree, S[0], labels)
# component tree from the max-tree
labels_ct = {}
total = accumulate(nx_max_tree, S[0], labels_ct)
# positions of nodes : canonical max-tree (CMT)
pos_cmt = position_nodes_for_max_tree(canonical_max_tree, image_rav)
# positions of nodes : max-tree (MT)
pos_mt = dict(zip(nx_max_tree.nodes, [pos_cmt[node]
for node in nx_max_tree.nodes]))
# plot the trees with networkx and matplotlib
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(20, 8))
plot_tree(nx_max_tree, pos_mt, ax1, title='Component tree',
labels=labels_ct, font_size=6, text_size=8)
plot_tree(nx_max_tree, pos_mt, ax2, title='Max tree', labels=labels)
plot_tree(canonical_max_tree, pos_cmt, ax3, title='Canonical max tree')
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,235 @@
"""
===============
Li thresholding
===============
In 1993, Li and Lee proposed a new criterion for finding the "optimal"
threshold to distinguish between the background and foreground of an image
[1]_. They proposed that minimizing the *cross-entropy* between the foreground
and the foreground mean, and the background and the background mean, would give
the best threshold in most situations.
Until 1998, though, the way to find this threshold was by trying all possible
thresholds and then choosing the one with the smallest cross-entropy. At that
point, Li and Tam implemented a new, iterative method to more quickly find the
optimum point by using the slope of the cross-entropy [2]_. This is the method
implemented in scikit-image's :func:`skimage.filters.threshold_li`.
Here, we demonstrate the cross-entropy and its optimization by Li's iterative
method. Note that we are using the private function `_cross_entropy`, which
should not be used in production code, as it could change.
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
:DOI:`10.1016/0031-3203(93)90115-D`
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
:DOI:`10.1016/S0167-8655(98)00057-9`
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage import filters
from skimage.filters.thresholding import _cross_entropy
cell = data.cell()
camera = data.camera()
###############################################################################
# First, we let's plot the cross entropy for the :func:`skimage.data.camera`
# image at all possible thresholds.
thresholds = np.arange(np.min(camera) + 1.5, np.max(camera) - 1.5)
entropies = [_cross_entropy(camera, t) for t in thresholds]
optimal_camera_threshold = thresholds[np.argmin(entropies)]
fig, ax = plt.subplots(1, 3, figsize=(8, 3))
ax[0].imshow(camera, cmap='gray')
ax[0].set_title('image')
ax[0].set_axis_off()
ax[1].imshow(camera > optimal_camera_threshold, cmap='gray')
ax[1].set_title('thresholded')
ax[1].set_axis_off()
ax[2].plot(thresholds, entropies)
ax[2].set_xlabel('thresholds')
ax[2].set_ylabel('cross-entropy')
ax[2].vlines(optimal_camera_threshold,
ymin=np.min(entropies) - 0.05 * np.ptp(entropies),
ymax=np.max(entropies) - 0.05 * np.ptp(entropies))
ax[2].set_title('optimal threshold')
fig.tight_layout()
print('The brute force optimal threshold is:', optimal_camera_threshold)
print('The computed optimal threshold is:', filters.threshold_li(camera))
plt.show()
###############################################################################
# Next, let's use the ``iter_callback`` feature of ``threshold_li`` to examine
# the optimization process as it happens.
iter_thresholds = []
optimal_threshold = filters.threshold_li(camera,
iter_callback=iter_thresholds.append)
iter_entropies = [_cross_entropy(camera, t) for t in iter_thresholds]
print('Only', len(iter_thresholds), 'thresholds examined.')
fig, ax = plt.subplots()
ax.plot(thresholds, entropies, label='all threshold entropies')
ax.plot(iter_thresholds, iter_entropies, label='optimization path')
ax.scatter(iter_thresholds, iter_entropies, c='C1')
ax.legend(loc='upper right')
plt.show()
###############################################################################
# This is clearly much more efficient than the brute force approach. However,
# in some images, the cross-entropy is not *convex*, meaning having a single
# optimum. In this case, gradient descent could yield a threshold that is not
# optimal. In this example, we see how a bad initial guess for the optimization
# results in a poor threshold selection.
iter_thresholds2 = []
opt_threshold2 = filters.threshold_li(cell, initial_guess=64,
iter_callback=iter_thresholds2.append)
thresholds2 = np.arange(np.min(cell) + 1.5, np.max(cell) - 1.5)
entropies2 = [_cross_entropy(cell, t) for t in thresholds]
iter_entropies2 = [_cross_entropy(cell, t) for t in iter_thresholds2]
fig, ax = plt.subplots(1, 3, figsize=(8, 3))
ax[0].imshow(cell, cmap='magma')
ax[0].set_title('image')
ax[0].set_axis_off()
ax[1].imshow(cell > opt_threshold2, cmap='gray')
ax[1].set_title('thresholded')
ax[1].set_axis_off()
ax[2].plot(thresholds2, entropies2, label='all threshold entropies')
ax[2].plot(iter_thresholds2, iter_entropies2, label='optimization path')
ax[2].scatter(iter_thresholds2, iter_entropies2, c='C1')
ax[2].legend(loc='upper right')
plt.show()
###############################################################################
# In this image, amazingly, the *default* initial guess, the mean image value,
# actually lies *right* on top of the peak between the two "valleys" of the
# objective function. Without supplying an initial guess, Li's thresholding
# method does nothing at all!
iter_thresholds3 = []
opt_threshold3 = filters.threshold_li(cell,
iter_callback=iter_thresholds3.append)
iter_entropies3 = [_cross_entropy(cell, t) for t in iter_thresholds3]
fig, ax = plt.subplots(1, 3, figsize=(8, 3))
ax[0].imshow(cell, cmap='magma')
ax[0].set_title('image')
ax[0].set_axis_off()
ax[1].imshow(cell > opt_threshold3, cmap='gray')
ax[1].set_title('thresholded')
ax[1].set_axis_off()
ax[2].plot(thresholds2, entropies2, label='all threshold entropies')
ax[2].plot(iter_thresholds3, iter_entropies3, label='optimization path')
ax[2].scatter(iter_thresholds3, iter_entropies3, c='C1')
ax[2].legend(loc='upper right')
plt.show()
###############################################################################
# To see what is going on, let's define a function, ``li_gradient``, that
# replicates the inner loop of the Li method and returns the *change* from the
# current threshold value to the next one. When this gradient is 0, we are at
# a so-called *stationary point* and Li returns this value. When it is
# negative, the next threshold guess will be lower, and when it is positive,
# the next guess will be higher.
#
# In the plot below, we show the cross-entropy and the Li update path when the
# initial guess is on the *right* side of that entropy peak. We overlay the
# threshold update gradient, marking the 0 gradient line and the default
# initial guess by ``threshold_li``.
def li_gradient(image, t):
"""Find the threshold update at a given threshold."""
foreground = image > t
mean_fore = np.mean(image[foreground])
mean_back = np.mean(image[~foreground])
t_next = ((mean_back - mean_fore) /
(np.log(mean_back) - np.log(mean_fore)))
dt = t_next - t
return dt
iter_thresholds4 = []
opt_threshold4 = filters.threshold_li(cell, initial_guess=68,
iter_callback=iter_thresholds4.append)
iter_entropies4 = [_cross_entropy(cell, t) for t in iter_thresholds4]
print(len(iter_thresholds4), 'examined, optimum:', opt_threshold4)
gradients = [li_gradient(cell, t) for t in thresholds2]
fig, ax1 = plt.subplots()
ax1.plot(thresholds2, entropies2)
ax1.plot(iter_thresholds4, iter_entropies4)
ax1.scatter(iter_thresholds4, iter_entropies4, c='C1')
ax1.set_xlabel('threshold')
ax1.set_ylabel('cross entropy', color='C0')
ax1.tick_params(axis='y', labelcolor='C0')
ax2 = ax1.twinx()
ax2.plot(thresholds2, gradients, c='C3')
ax2.hlines([0], xmin=thresholds2[0], xmax=thresholds2[-1],
colors='gray', linestyles='dashed')
ax2.vlines(np.mean(cell), ymin=np.min(gradients), ymax=np.max(gradients),
colors='gray', linestyles='dashed')
ax2.set_ylabel(r'$\Delta$(threshold)', color='C3')
ax2.tick_params(axis='y', labelcolor='C3')
fig.tight_layout()
plt.show()
###############################################################################
# In addition to allowing users to provide a number as an initial guess,
# :func:`skimage.filters.threshold_li` can receive a function that makes a
# guess from the image intensities, just like :func:`numpy.mean` does by
# default. This might be a good option when many images with different ranges
# need to be processed.
def quantile_95(image):
# you can use np.quantile(image, 0.95) if you have NumPy>=1.15
return np.percentile(image, 95)
iter_thresholds5 = []
opt_threshold5 = filters.threshold_li(cell, initial_guess=quantile_95,
iter_callback=iter_thresholds5.append)
iter_entropies5 = [_cross_entropy(cell, t) for t in iter_thresholds5]
print(len(iter_thresholds5), 'examined, optimum:', opt_threshold5)
fig, ax1 = plt.subplots()
ax1.plot(thresholds2, entropies2)
ax1.plot(iter_thresholds5, iter_entropies5)
ax1.scatter(iter_thresholds5, iter_entropies5, c='C1')
ax1.set_xlabel('threshold')
ax1.set_ylabel('cross entropy', color='C0')
ax1.tick_params(axis='y', labelcolor='C0')
plt.show()

View File

@ -0,0 +1,2 @@
Edges and lines
---------------

View File

@ -0,0 +1,78 @@
"""
====================
Active Contour Model
====================
The active contour model is a method to fit open or closed splines to lines or
edges in an image [1]_. It works by minimising an energy that is in part
defined by the image and part by the spline's shape: length and smoothness. The
minimization is done implicitly in the shape energy and explicitly in the
image energy.
In the following two examples the active contour model is used (1) to segment
the face of a person from the rest of an image by fitting a closed curve
to the edges of the face and (2) to find the darkest curve between two fixed
points while obeying smoothness considerations. Typically it is a good idea to
smooth images a bit before analyzing, as done in the following examples.
We initialize a circle around the astronaut's face and use the default boundary
condition ``boundary_condition='periodic'`` to fit a closed curve. The default
parameters ``w_line=0, w_edge=1`` will make the curve search towards edges,
such as the boundaries of the face.
.. [1] *Snakes: Active contour models*. Kass, M.; Witkin, A.; Terzopoulos, D.
International Journal of Computer Vision 1 (4): 321 (1988).
:DOI:`10.1007/BF00133570`
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage import data
from skimage.filters import gaussian
from skimage.segmentation import active_contour
img = data.astronaut()
img = rgb2gray(img)
s = np.linspace(0, 2*np.pi, 400)
r = 100 + 100*np.sin(s)
c = 220 + 100*np.cos(s)
init = np.array([r, c]).T
snake = active_contour(gaussian(img, 3),
init, alpha=0.015, beta=10, gamma=0.001)
fig, ax = plt.subplots(figsize=(7, 7))
ax.imshow(img, cmap=plt.cm.gray)
ax.plot(init[:, 1], init[:, 0], '--r', lw=3)
ax.plot(snake[:, 1], snake[:, 0], '-b', lw=3)
ax.set_xticks([]), ax.set_yticks([])
ax.axis([0, img.shape[1], img.shape[0], 0])
plt.show()
######################################################################
# Here we initialize a straight line between two points, `(5, 136)` and
# `(424, 50)`, and require that the spline has its end points there by giving
# the boundary condition `boundary_condition='fixed'`. We furthermore
# make the algorithm search for dark lines by giving a negative `w_line` value.
img = data.text()
r = np.linspace(136, 50, 100)
c = np.linspace(5, 424, 100)
init = np.array([r, c]).T
snake = active_contour(gaussian(img, 1), init, boundary_condition='fixed',
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
fig, ax = plt.subplots(figsize=(9, 5))
ax.imshow(img, cmap=plt.cm.gray)
ax.plot(init[:, 1], init[:, 0], '--r', lw=3)
ax.plot(snake[:, 1], snake[:, 0], '-b', lw=3)
ax.set_xticks([]), ax.set_yticks([])
ax.axis([0, img.shape[1], img.shape[0], 0])
plt.show()

View File

@ -0,0 +1,55 @@
"""
===================
Canny edge detector
===================
The Canny filter is a multi-stage edge detector. It uses a filter based on the
derivative of a Gaussian in order to compute the intensity of the gradients.The
Gaussian reduces the effect of noise present in the image. Then, potential
edges are thinned down to 1-pixel curves by removing non-maximum pixels of the
gradient magnitude. Finally, edge pixels are kept or removed using hysteresis
thresholding on the gradient magnitude.
The Canny has three adjustable parameters: the width of the Gaussian (the
noisier the image, the greater the width), and the low and high threshold for
the hysteresis thresholding.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import feature
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndi.rotate(im, 15, mode='constant')
im = ndi.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=3)
# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
sharex=True, sharey=True)
ax1.imshow(im, cmap=plt.cm.gray)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)
ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title(r'Canny filter, $\sigma=1$', fontsize=20)
ax3.imshow(edges2, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title(r'Canny filter, $\sigma=3$', fontsize=20)
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,139 @@
"""
========================================
Circular and Elliptical Hough Transforms
========================================
The Hough transform in its simplest form is a `method to detect
straight lines <https://en.wikipedia.org/wiki/Hough_transform>`__
but it can also be used to detect circles or ellipses.
The algorithm assumes that the edge is detected and it is robust against
noise or missing points.
Circle detection
================
In the following example, the Hough transform is used to detect
coin positions and match their edges. We provide a range of
plausible radii. For each radius, two circles are extracted and
we finally keep the five most prominent candidates.
The result shows that coin positions are well-detected.
Algorithm overview
------------------
Given a black circle on a white background, we first guess its
radius (or a range of radii) to construct a new circle.
This circle is applied on each black pixel of the original picture
and the coordinates of this circle are voting in an accumulator.
From this geometrical construction, the original circle center
position receives the highest score.
Note that the accumulator size is built to be larger than the
original picture in order to detect centers outside the frame.
Its size is extended by two times the larger radius.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
# Load picture and detect edges
image = img_as_ubyte(data.coins()[160:230, 70:270])
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)
# Detect two radii
hough_radii = np.arange(20, 35, 2)
hough_res = hough_circle(edges, hough_radii)
# Select the most prominent 3 circles
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii,
total_num_peaks=3)
# Draw them
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 4))
image = color.gray2rgb(image)
for center_y, center_x, radius in zip(cy, cx, radii):
circy, circx = circle_perimeter(center_y, center_x, radius,
shape=image.shape)
image[circy, circx] = (220, 20, 20)
ax.imshow(image, cmap=plt.cm.gray)
plt.show()
######################################################################
# Ellipse detection
# =================
#
# In this second example, the aim is to detect the edge of a coffee cup.
# Basically, this is a projection of a circle, i.e. an ellipse. The problem
# to solve is much more difficult because five parameters have to be
# determined, instead of three for circles.
#
# Algorithm overview
# -------------------
#
# The algorithm takes two different points belonging to the ellipse. It
# assumes that it is the main axis. A loop on all the other points determines
# how much an ellipse passes to them. A good match corresponds to high
# accumulator values.
#
# A full description of the algorithm can be found in reference [1]_.
#
# References
# ----------
# .. [1] Xie, Yonghong, and Qiang Ji. "A new efficient
# ellipse detection method." Pattern Recognition, 2002. Proceedings.
# 16th International Conference on. Vol. 2. IEEE, 2002
import matplotlib.pyplot as plt
from skimage import data, color, img_as_ubyte
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter
# Load picture, convert to grayscale and detect edges
image_rgb = data.coffee()[0:220, 160:420]
image_gray = color.rgb2gray(image_rgb)
edges = canny(image_gray, sigma=2.0,
low_threshold=0.55, high_threshold=0.8)
# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=20, threshold=250,
min_size=100, max_size=120)
result.sort(order='accumulator')
# Estimated parameters for the ellipse
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
# Draw the edge (white) and the resulting ellipse (red)
edges = color.gray2rgb(img_as_ubyte(edges))
edges[cy, cx] = (250, 0, 0)
fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4),
sharex=True, sharey=True)
ax1.set_title('Original picture')
ax1.imshow(image_rgb)
ax2.set_title('Edge (white) and result (red)')
ax2.imshow(edges)
plt.show()

View File

@ -0,0 +1,41 @@
"""
===============
Contour finding
===============
We use a marching squares method to find constant valued contours in an image.
In ``skimage.measure.find_contours``, array values are linearly interpolated
to provide better precision of the output contours. Contours which intersect
the image edge are open; all others are closed.
The `marching squares algorithm
<http://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html>`__ is a
special case of the marching cubes algorithm (Lorensen, William and Harvey
E. Cline. Marching Cubes: A High Resolution 3D Surface Construction Algorithm.
Computer Graphics SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
# Construct some test data
x, y = np.ogrid[-np.pi:np.pi:100j, -np.pi:np.pi:100j]
r = np.sin(np.exp((np.sin(x)**3 + np.cos(y)**2)))
# Find contours at a constant value of 0.8
contours = measure.find_contours(r, 0.8)
# Display the image and plot all contours found
fig, ax = plt.subplots()
ax.imshow(r, cmap=plt.cm.gray)
for contour in contours:
ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
ax.axis('image')
ax.set_xticks([])
ax.set_yticks([])
plt.show()

View File

@ -0,0 +1,49 @@
"""
===========
Convex Hull
===========
The convex hull of a binary image is the set of pixels included in the
smallest convex polygon that surround all white pixels in the input.
A good overview of the algorithm is given on `Steve Eddin's blog
<https://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/>`__.
"""
import matplotlib.pyplot as plt
from skimage.morphology import convex_hull_image
from skimage import data, img_as_float
from skimage.util import invert
# The original image is inverted as the object must be white.
image = invert(data.horse())
chull = convex_hull_image(image)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
ax[0].set_title('Original picture')
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_axis_off()
ax[1].set_title('Transformed picture')
ax[1].imshow(chull, cmap=plt.cm.gray)
ax[1].set_axis_off()
plt.tight_layout()
plt.show()
######################################################################
# We prepare a second plot to show the difference.
#
chull_diff = img_as_float(chull.copy())
chull_diff[image] = 2
fig, ax = plt.subplots()
ax.imshow(chull_diff, cmap=plt.cm.gray)
ax.set_title('Difference')
plt.show()

View File

@ -0,0 +1,173 @@
"""
==============
Edge operators
==============
Edge operators are used in image processing within edge detection algorithms.
They are discrete differentiation operators, computing an approximation of the
gradient of the image intensity function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import filters
from skimage.data import camera
from skimage.util import compare_images
image = camera()
edge_roberts = filters.roberts(image)
edge_sobel = filters.sobel(image)
fig, axes = plt.subplots(ncols=2, sharex=True, sharey=True,
figsize=(8, 4))
axes[0].imshow(edge_roberts, cmap=plt.cm.gray)
axes[0].set_title('Roberts Edge Detection')
axes[1].imshow(edge_sobel, cmap=plt.cm.gray)
axes[1].set_title('Sobel Edge Detection')
for ax in axes:
ax.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# Different operators compute different finite-difference approximations of
# the gradient. For example, the Scharr filter results in a less rotational
# variance than the Sobel filter that is in turn better than the Prewitt
# filter [1]_ [2]_ [3]_. The difference between the Prewitt and Sobel filters
# and the Scharr filter is illustrated below with an image that is the
# discretization of a rotation- invariant continuous function. The
# discrepancy between the Prewitt and Sobel filters, and the Scharr filter is
# stronger for regions of the image where the direction of the gradient is
# close to diagonal, and for regions with high spatial frequencies. For the
# example image the differences between the filter results are very small and
# the filter results are visually almost indistinguishable.
#
# .. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
#
# .. [2] B. Jaehne, H. Scharr, and S. Koerkel. Principles of filter design.
# In Handbook of Computer Vision and Applications. Academic Press,
# 1999.
#
# .. [3] https://en.wikipedia.org/wiki/Prewitt_operator
x, y = np.ogrid[:100, :100]
# Creating a rotation-invariant image with different spatial frequencies.
image_rot = np.exp(1j * np.hypot(x, y) ** 1.3 / 20.).real
edge_sobel = filters.sobel(image_rot)
edge_scharr = filters.scharr(image_rot)
edge_prewitt = filters.prewitt(image_rot)
diff_scharr_prewitt = compare_images(edge_scharr, edge_prewitt)
diff_scharr_sobel = compare_images(edge_scharr, edge_sobel)
max_diff = np.max(np.maximum(diff_scharr_prewitt, diff_scharr_sobel))
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True,
figsize=(8, 8))
axes = axes.ravel()
axes[0].imshow(image_rot, cmap=plt.cm.gray)
axes[0].set_title('Original image')
axes[1].imshow(edge_scharr, cmap=plt.cm.gray)
axes[1].set_title('Scharr Edge Detection')
axes[2].imshow(diff_scharr_prewitt, cmap=plt.cm.gray, vmax=max_diff)
axes[2].set_title('Scharr - Prewitt')
axes[3].imshow(diff_scharr_sobel, cmap=plt.cm.gray, vmax=max_diff)
axes[3].set_title('Scharr - Sobel')
for ax in axes:
ax.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# As in the previous example, here we illustrate the rotational invariance of
# the filters. The top row shows a rotationally invariant image along with the
# angle of its analytical gradient. The other two rows contain the difference
# between the different gradient approximations (Sobel, Prewitt, Scharr &
# Farid) and analytical gradient.
#
# The Farid & Simoncelli derivative filters [4]_, [5]_ are the most
# rotationally invariant, but require a 5x5 kernel, which is computationally
# more intensive than a 3x3 kernel.
#
# .. [4] Farid, H. and Simoncelli, E. P., "Differentiation of discrete
# multidimensional signals", IEEE Transactions on Image Processing
# 13(4): 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
#
# .. [5] Wikipedia, "Farid and Simoncelli Derivatives." Available at:
# <https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives>
x, y = np.mgrid[-10:10:255j, -10:10:255j]
image_rotinv = np.sin(x ** 2 + y ** 2)
image_x = 2 * x * np.cos(x ** 2 + y ** 2)
image_y = 2 * y * np.cos(x ** 2 + y ** 2)
def angle(dx, dy):
"""Calculate the angles between horizontal and vertical operators."""
return np.mod(np.arctan2(dy, dx), np.pi)
true_angle = angle(image_x, image_y)
angle_farid = angle(filters.farid_h(image_rotinv),
filters.farid_v(image_rotinv))
angle_sobel = angle(filters.sobel_h(image_rotinv),
filters.sobel_v(image_rotinv))
angle_scharr = angle(filters.scharr_h(image_rotinv),
filters.scharr_v(image_rotinv))
angle_prewitt = angle(filters.prewitt_h(image_rotinv),
filters.prewitt_v(image_rotinv))
def diff_angle(angle_1, angle_2):
"""Calculate the differences between two angles."""
return np.minimum(np.pi - np.abs(angle_1 - angle_2),
np.abs(angle_1 - angle_2))
diff_farid = diff_angle(true_angle, angle_farid)
diff_sobel = diff_angle(true_angle, angle_sobel)
diff_scharr = diff_angle(true_angle, angle_scharr)
diff_prewitt = diff_angle(true_angle, angle_prewitt)
fig, axes = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True,
figsize=(8, 8))
axes = axes.ravel()
axes[0].imshow(image_rotinv, cmap=plt.cm.gray)
axes[0].set_title('Original image')
axes[1].imshow(true_angle, cmap=plt.cm.hsv)
axes[1].set_title('Analytical gradient angle')
axes[2].imshow(diff_sobel, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
axes[2].set_title('Sobel error')
axes[3].imshow(diff_prewitt, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
axes[3].set_title('Prewitt error')
axes[4].imshow(diff_scharr, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
axes[4].set_title('Scharr error')
color_ax = axes[5].imshow(diff_farid, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
axes[5].set_title('Farid error')
fig.subplots_adjust(right=0.8)
colorbar_ax = fig.add_axes([0.90, 0.10, 0.02, 0.50])
fig.colorbar(color_ax, cax=colorbar_ax, ticks=[0, 0.01, 0.02])
for ax in axes:
ax.axis('off')
plt.show()

View File

@ -0,0 +1,149 @@
"""
=============================
Straight line Hough transform
=============================
The Hough transform in its simplest form is a method to detect straight lines
[1]_.
In the following example, we construct an image with a line intersection. We
then use the `Hough transform <https://en.wikipedia.org/wiki/Hough_transform>`__.
to explore a parameter space for straight lines that may run through the image.
Algorithm overview
------------------
Usually, lines are parameterised as :math:`y = mx + c`, with a gradient
:math:`m` and y-intercept `c`. However, this would mean that :math:`m` goes to
infinity for vertical lines. Instead, we therefore construct a segment
perpendicular to the line, leading to the origin. The line is represented by
the length of that segment, :math:`r`, and the angle it makes with the x-axis,
:math:`\\theta`.
The Hough transform constructs a histogram array representing the parameter
space (i.e., an :math:`M \\times N` matrix, for :math:`M` different values of
the radius and :math:`N` different values of :math:`\\theta`). For each
parameter combination, :math:`r` and :math:`\\theta`, we then find the number
of non-zero pixels in the input image that would fall close to the
corresponding line, and increment the array at position :math:`(r, \\theta)`
appropriately.
We can think of each non-zero pixel "voting" for potential line candidates. The
local maxima in the resulting histogram indicates the parameters of the most
probably lines. In our example, the maxima occur at 45 and 135 degrees,
corresponding to the normal vector angles of each line.
Another approach is the Progressive Probabilistic Hough Transform [2]_. It is
based on the assumption that using a random subset of voting points give a good
approximation to the actual result, and that lines can be extracted during the
voting process by walking along connected components. This returns the
beginning and end of each line segment, which is useful.
The function `probabilistic_hough` has three parameters: a general threshold
that is applied to the Hough accumulator, a minimum line length and the line
gap that influences line merging. In the example below, we find lines longer
than 10 with a gap less than 3 pixels.
References
----------
.. [1] Duda, R. O. and P. E. Hart, "Use of the Hough Transformation to
Detect Lines and Curves in Pictures," Comm. ACM, Vol. 15,
pp. 11-15 (January, 1972)
.. [2] C. Galamhos, J. Matas and J. Kittler,"Progressive probabilistic
Hough transform for line detection", in IEEE Computer Society
Conference on Computer Vision and Pattern Recognition, 1999.
"""
######################
# Line Hough Transform
# ====================
import numpy as np
from skimage.transform import hough_line, hough_line_peaks
from skimage.feature import canny
from skimage import data
import matplotlib.pyplot as plt
from matplotlib import cm
# Constructing test image
image = np.zeros((200, 200))
idx = np.arange(25, 175)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
# Classic straight-line Hough transform
# Set a precision of 0.5 degree.
tested_angles = np.linspace(-np.pi / 2, np.pi / 2, 360, endpoint=False)
h, theta, d = hough_line(image, theta=tested_angles)
# Generating figure 1
fig, axes = plt.subplots(1, 3, figsize=(15, 6))
ax = axes.ravel()
ax[0].imshow(image, cmap=cm.gray)
ax[0].set_title('Input image')
ax[0].set_axis_off()
ax[1].imshow(np.log(1 + h),
extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]), d[-1], d[0]],
cmap=cm.gray, aspect=1/1.5)
ax[1].set_title('Hough transform')
ax[1].set_xlabel('Angles (degrees)')
ax[1].set_ylabel('Distance (pixels)')
ax[1].axis('image')
ax[2].imshow(image, cmap=cm.gray)
origin = np.array((0, image.shape[1]))
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)
ax[2].plot(origin, (y0, y1), '-r')
ax[2].set_xlim(origin)
ax[2].set_ylim((image.shape[0], 0))
ax[2].set_axis_off()
ax[2].set_title('Detected lines')
plt.tight_layout()
plt.show()
###############################
# Probabilistic Hough Transform
# =============================
from skimage.transform import probabilistic_hough_line
# Line finding using the Probabilistic Hough Transform
image = data.camera()
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough_line(edges, threshold=10, line_length=5,
line_gap=3)
# Generating figure 2
fig, axes = plt.subplots(1, 3, figsize=(15, 5), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(image, cmap=cm.gray)
ax[0].set_title('Input image')
ax[1].imshow(edges, cmap=cm.gray)
ax[1].set_title('Canny edges')
ax[2].imshow(edges * 0)
for line in lines:
p0, p1 = line
ax[2].plot((p0[0], p1[0]), (p0[1], p1[1]))
ax[2].set_xlim((0, image.shape[1]))
ax[2].set_ylim((image.shape[0], 0))
ax[2].set_title('Probabilistic Hough')
for a in ax:
a.set_axis_off()
plt.tight_layout()
plt.show()

View File

@ -0,0 +1,58 @@
"""
==============
Marching Cubes
==============
Marching cubes is an algorithm to extract a 2D surface mesh from a 3D volume.
This can be conceptualized as a 3D generalization of isolines on topographical
or weather maps. It works by iterating across the volume, looking for regions
which cross the level of interest. If such regions are found, triangulations
are generated and added to an output mesh. The final result is a set of
vertices and a set of triangular faces.
The algorithm requires a data volume and an isosurface value. For example, in
CT imaging Hounsfield units of +700 to +3000 represent bone. So, one potential
input would be a reconstructed CT set of data and the value +700, to extract
a mesh for regions of bone or bone-like density.
This implementation also works correctly on anisotropic datasets, where the
voxel spacing is not equal for every spatial dimension, through use of the
`spacing` kwarg.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from skimage import measure
from skimage.draw import ellipsoid
# Generate a level set about zero of two identical ellipsoids in 3D
ellip_base = ellipsoid(6, 10, 16, levelset=True)
ellip_double = np.concatenate((ellip_base[:-1, ...],
ellip_base[2:, ...]), axis=0)
# Use marching cubes to obtain the surface mesh of these ellipsoids
verts, faces, normals, values = measure.marching_cubes(ellip_double, 0)
# Display resulting triangular mesh using Matplotlib. This can also be done
# with mayavi (see skimage.measure.marching_cubes_lewiner docstring).
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
ax.set_xlabel("x-axis: a = 6 per ellipsoid")
ax.set_ylabel("y-axis: b = 10")
ax.set_zlabel("z-axis: c = 16")
ax.set_xlim(0, 24) # a = 6 (times two for 2nd ellipsoid)
ax.set_ylim(0, 20) # b = 10
ax.set_zlim(0, 32) # c = 16
plt.tight_layout()
plt.show()

View File

@ -0,0 +1,79 @@
"""
==================================
Approximate and subdivide polygons
==================================
This example shows how to approximate (Douglas-Peucker algorithm) and subdivide
(B-Splines) polygonal chains.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import ellipse
from skimage.measure import find_contours, approximate_polygon, \
subdivide_polygon
hand = np.array([[1.64516129, 1.16145833],
[1.64516129, 1.59375],
[1.35080645, 1.921875],
[1.375, 2.18229167],
[1.68548387, 1.9375],
[1.60887097, 2.55208333],
[1.68548387, 2.69791667],
[1.76209677, 2.56770833],
[1.83064516, 1.97395833],
[1.89516129, 2.75],
[1.9516129, 2.84895833],
[2.01209677, 2.76041667],
[1.99193548, 1.99479167],
[2.11290323, 2.63020833],
[2.2016129, 2.734375],
[2.25403226, 2.60416667],
[2.14919355, 1.953125],
[2.30645161, 2.36979167],
[2.39112903, 2.36979167],
[2.41532258, 2.1875],
[2.1733871, 1.703125],
[2.07782258, 1.16666667]])
# subdivide polygon using 2nd degree B-Splines
new_hand = hand.copy()
for _ in range(5):
new_hand = subdivide_polygon(new_hand, degree=2, preserve_ends=True)
# approximate subdivided polygon with Douglas-Peucker algorithm
appr_hand = approximate_polygon(new_hand, tolerance=0.02)
print("Number of coordinates:", len(hand), len(new_hand), len(appr_hand))
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(9, 4))
ax1.plot(hand[:, 0], hand[:, 1])
ax1.plot(new_hand[:, 0], new_hand[:, 1])
ax1.plot(appr_hand[:, 0], appr_hand[:, 1])
# create two ellipses in image
img = np.zeros((800, 800), 'int32')
rr, cc = ellipse(250, 250, 180, 230, img.shape)
img[rr, cc] = 1
rr, cc = ellipse(600, 600, 150, 90, img.shape)
img[rr, cc] = 1
plt.gray()
ax2.imshow(img)
# approximate / simplify coordinates of the two ellipses
for contour in find_contours(img, 0):
coords = approximate_polygon(contour, tolerance=2.5)
ax2.plot(coords[:, 1], coords[:, 0], '-r', linewidth=2)
coords2 = approximate_polygon(contour, tolerance=39.5)
ax2.plot(coords2[:, 1], coords2[:, 0], '-g', linewidth=2)
print("Number of coordinates:", len(contour), len(coords), len(coords2))
ax2.axis((0, 800, 0, 800))
plt.show()

View File

@ -0,0 +1,59 @@
"""
=============
Random Shapes
=============
Example of generating random shapes with particular properties.
"""
import matplotlib.pyplot as plt
from skimage.draw import random_shapes
# Let's start simple and generate a 128x128 image
# with a single grayscale rectangle.
result = random_shapes((128, 128), max_shapes=1, shape='rectangle',
multichannel=False, random_seed=0)
# We get back a tuple consisting of (1) the image with the generated shapes
# and (2) a list of label tuples with the kind of shape (e.g. circle,
# rectangle) and ((r0, r1), (c0, c1)) coordinates.
image, labels = result
print(f"Image shape: {image.shape}\nLabels: {labels}")
# We can visualize the images.
fig, axes = plt.subplots(nrows=2, ncols=3)
ax = axes.ravel()
ax[0].imshow(image, cmap='gray')
ax[0].set_title('Grayscale shape')
# The generated images can be much more complex. For example, let's try many
# shapes of any color. If we want the colors to be particularly light, we can
# set the `intensity_range` to an upper subrange of (0,255).
image1, _ = random_shapes((128, 128), max_shapes=10,
intensity_range=((100, 255),))
# Moar :)
image2, _ = random_shapes((128, 128), max_shapes=10,
intensity_range=((200, 255),))
image3, _ = random_shapes((128, 128), max_shapes=10,
intensity_range=((50, 255),))
image4, _ = random_shapes((128, 128), max_shapes=10,
intensity_range=((0, 255),))
for i, image in enumerate([image1, image2, image3, image4], 1):
ax[i].imshow(image)
ax[i].set_title(f"Colored shapes, #{i-1}")
# These shapes are well suited to test segmentation algorithms. Often, we
# want shapes to overlap to test the algorithm. This is also possible:
image, _ = random_shapes((128, 128), min_shapes=5, max_shapes=10,
min_size=20, allow_overlap=True)
ax[5].imshow(image)
ax[5].set_title('Overlapping shapes')
for a in ax:
a.set_xticklabels([])
a.set_yticklabels([])
plt.show()

View File

@ -0,0 +1,78 @@
"""
===============
Ridge operators
===============
Ridge filters can be used to detect ridge-like structures, such as neurites
[1]_, tubes [2]_, vessels [3]_, wrinkles [4]_ or rivers.
Different ridge filters may be suited for detecting different structures,
e.g., depending on contrast or noise level.
The present class of ridge filters relies on the eigenvalues of
the Hessian matrix of image intensities to detect ridge structures where the
intensity changes perpendicular but not along the structure.
Note that, due to edge effects, results for Meijering and Frangi filters
are cropped by 4 pixels on each edge to get a proper rendering.
References
----------
.. [1] Meijering, E., Jacob, M., Sarria, J. C., Steiner, P., Hirling, H.,
Unser, M. (2004). Design and validation of a tool for neurite tracing
and analysis in fluorescence microscopy images. Cytometry Part A, 58(2),
167-176.
:DOI:`10.1002/cyto.a.20022`
.. [2] Sato, Y., Nakajima, S., Shiraga, N., Atsumi, H., Yoshida, S.,
Koller, T., ..., Kikinis, R. (1998). Three-dimensional multi-scale line
filter for segmentation and visualization of curvilinear structures in
medical images. Medical image analysis, 2(2), 143-168.
:DOI:`10.1016/S1361-8415(98)80009-1`
.. [3] Frangi, A. F., Niessen, W. J., Vincken, K. L., & Viergever, M. A. (1998,
October). Multiscale vessel enhancement filtering. In International
Conference on Medical Image Computing and Computer-Assisted Intervention
(pp. 130-137). Springer Berlin Heidelberg.
:DOI:`10.1007/BFb0056195`
.. [4] Ng, C. C., Yap, M. H., Costen, N., & Li, B. (2014, November). Automatic
wrinkle detection using hybrid Hessian filter. In Asian Conference on
Computer Vision (pp. 609-622). Springer International Publishing.
:DOI:`10.1007/978-3-319-16811-1_40`
"""
from skimage import data
from skimage import color
from skimage.filters import meijering, sato, frangi, hessian
import matplotlib.pyplot as plt
def identity(image, **kwargs):
"""Return the original image, ignoring any kwargs."""
return image
image = color.rgb2gray(data.retina())[300:700, 700:900]
cmap = plt.cm.gray
kwargs = {'sigmas': [1], 'mode': 'reflect'}
fig, axes = plt.subplots(2, 5)
for i, black_ridges in enumerate([1, 0]):
for j, func in enumerate([identity, meijering, sato, frangi, hessian]):
kwargs['black_ridges'] = black_ridges
result = func(image, **kwargs)
axes[i, j].imshow(result, cmap=cmap, aspect='auto')
if i == 0:
axes[i, j].set_title(['Original\nimage', 'Meijering\nneuriteness',
'Sato\ntubeness', 'Frangi\nvesselness',
'Hessian\nvesselness'][j])
if j == 0:
axes[i, j].set_ylabel('black_ridges = ' + str(bool(black_ridges)))
axes[i, j].set_xticks([])
axes[i, j].set_yticks([])
plt.tight_layout()
plt.show()

View File

@ -0,0 +1,97 @@
"""
======
Shapes
======
This example shows how to draw several different shapes:
- line
- Bezier curve
- polygon
- disk
- ellipse
Anti-aliased drawing for:
- line
- circle
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import (line, polygon, disk,
circle_perimeter,
ellipse, ellipse_perimeter,
bezier_curve)
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 6))
img = np.zeros((500, 500, 3), dtype=np.double)
# draw line
rr, cc = line(120, 123, 20, 400)
img[rr, cc, 0] = 255
# fill polygon
poly = np.array((
(300, 300),
(480, 320),
(380, 430),
(220, 590),
(300, 300),
))
rr, cc = polygon(poly[:, 0], poly[:, 1], img.shape)
img[rr, cc, 1] = 1
# fill circle
rr, cc = disk((200, 200), 100, shape=img.shape)
img[rr, cc, :] = (1, 1, 0)
# fill ellipse
rr, cc = ellipse(300, 300, 100, 200, img.shape)
img[rr, cc, 2] = 1
# circle
rr, cc = circle_perimeter(120, 400, 15)
img[rr, cc, :] = (1, 0, 0)
# Bezier curve
rr, cc = bezier_curve(70, 100, 10, 10, 150, 100, 1)
img[rr, cc, :] = (1, 0, 0)
# ellipses
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=math.pi / 4.)
img[rr, cc, :] = (1, 0, 1)
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=-math.pi / 4.)
img[rr, cc, :] = (0, 0, 1)
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=math.pi / 2.)
img[rr, cc, :] = (1, 1, 1)
ax1.imshow(img)
ax1.set_title('No anti-aliasing')
ax1.axis('off')
from skimage.draw import line_aa, circle_perimeter_aa
img = np.zeros((100, 100), dtype=np.double)
# anti-aliased line
rr, cc, val = line_aa(12, 12, 20, 50)
img[rr, cc] = val
# anti-aliased circle
rr, cc, val = circle_perimeter_aa(60, 40, 30)
img[rr, cc] = val
ax2.imshow(img, cmap=plt.cm.gray)
ax2.set_title('Anti-aliasing')
ax2.axis('off')
plt.show()

View File

@ -0,0 +1,197 @@
"""
===========
Skeletonize
===========
Skeletonization reduces binary objects to 1 pixel wide representations. This
can be useful for feature extraction, and/or representing an object's topology.
``skeletonize`` works by making successive passes of the image. On each pass,
border pixels are identified and removed on the condition that they do not
break the connectivity of the corresponding object.
"""
from skimage.morphology import skeletonize
from skimage import data
import matplotlib.pyplot as plt
from skimage.util import invert
# Invert the horse image
image = invert(data.horse())
# perform skeletonization
skeleton = skeletonize(image)
# display results
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4),
sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].axis('off')
ax[0].set_title('original', fontsize=20)
ax[1].imshow(skeleton, cmap=plt.cm.gray)
ax[1].axis('off')
ax[1].set_title('skeleton', fontsize=20)
fig.tight_layout()
plt.show()
######################################################################
# **Zhang's method vs Lee's method**
#
# ``skeletonize`` [Zha84]_ works by making successive passes of
# the image, removing pixels on object borders. This continues until no
# more pixels can be removed. The image is correlated with a
# mask that assigns each pixel a number in the range [0...255]
# corresponding to each possible pattern of its 8 neighbouring
# pixels. A look up table is then used to assign the pixels a
# value of 0, 1, 2 or 3, which are selectively removed during
# the iterations.
#
# ``skeletonize(..., method='lee')`` [Lee94]_ uses an octree data structure
# to examine a 3x3x3 neighborhood of a pixel. The algorithm proceeds by
# iteratively sweeping over the image, and removing pixels at each iteration
# until the image stops changing. Each iteration consists of two steps: first,
# a list of candidates for removal is assembled; then pixels from this list
# are rechecked sequentially, to better preserve connectivity of the image.
#
# Note that Lee's method [Lee94]_ is designed to be used on 3-D images, and
# is selected automatically for those. For illustrative purposes, we apply
# this algorithm to a 2-D image.
#
# .. [Zha84] A fast parallel algorithm for thinning digital patterns,
# T. Y. Zhang and C. Y. Suen, Communications of the ACM,
# March 1984, Volume 27, Number 3.
#
# .. [Lee94] T.-C. Lee, R.L. Kashyap and C.-N. Chu, Building skeleton models
# via 3-D medial surface/axis thinning algorithms.
# Computer Vision, Graphics, and Image Processing, 56(6):462-478,
# 1994.
#
import matplotlib.pyplot as plt
from skimage.morphology import skeletonize
blobs = data.binary_blobs(200, blob_size_fraction=.2,
volume_fraction=.35, seed=1)
skeleton = skeletonize(blobs)
skeleton_lee = skeletonize(blobs, method='lee')
fig, axes = plt.subplots(1, 3, figsize=(8, 4), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(blobs, cmap=plt.cm.gray)
ax[0].set_title('original')
ax[0].axis('off')
ax[1].imshow(skeleton, cmap=plt.cm.gray)
ax[1].set_title('skeletonize')
ax[1].axis('off')
ax[2].imshow(skeleton_lee, cmap=plt.cm.gray)
ax[2].set_title('skeletonize (Lee 94)')
ax[2].axis('off')
fig.tight_layout()
plt.show()
######################################################################
# **Medial axis skeletonization**
#
# The medial axis of an object is the set of all points having more than one
# closest point on the object's boundary. It is often called the *topological
# skeleton*, because it is a 1-pixel wide skeleton of the object, with the same
# connectivity as the original object.
#
# Here, we use the medial axis transform to compute the width of the foreground
# objects. As the function ``medial_axis`` returns the distance transform in
# addition to the medial axis (with the keyword argument ``return_distance=True``),
# it is possible to compute the distance to the background for all points of
# the medial axis with this function. This gives an estimate of the local width
# of the objects.
#
# For a skeleton with fewer branches, ``skeletonize`` should be preferred.
from skimage.morphology import medial_axis, skeletonize
# Generate the data
blobs = data.binary_blobs(200, blob_size_fraction=.2,
volume_fraction=.35, seed=1)
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(blobs, return_distance=True)
# Compare with other skeletonization algorithms
skeleton = skeletonize(blobs)
skeleton_lee = skeletonize(blobs, method='lee')
# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(blobs, cmap=plt.cm.gray)
ax[0].set_title('original')
ax[0].axis('off')
ax[1].imshow(dist_on_skel, cmap='magma')
ax[1].contour(blobs, [0.5], colors='w')
ax[1].set_title('medial_axis')
ax[1].axis('off')
ax[2].imshow(skeleton, cmap=plt.cm.gray)
ax[2].set_title('skeletonize')
ax[2].axis('off')
ax[3].imshow(skeleton_lee, cmap=plt.cm.gray)
ax[3].set_title("skeletonize (Lee 94)")
ax[3].axis('off')
fig.tight_layout()
plt.show()
######################################################################
# **Morphological thinning**
#
# Morphological thinning, implemented in the `thin` function, works on the
# same principle as `skeletonize`: remove pixels from the borders at each
# iteration until none can be removed without altering the connectivity. The
# different rules of removal can speed up skeletonization and result in
# different final skeletons.
#
# The `thin` function also takes an optional `max_iter` keyword argument to
# limit the number of thinning iterations, and thus produce a relatively
# thicker skeleton.
from skimage.morphology import skeletonize, thin
skeleton = skeletonize(image)
thinned = thin(image)
thinned_partial = thin(image, max_iter=25)
fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].set_title('original')
ax[0].axis('off')
ax[1].imshow(skeleton, cmap=plt.cm.gray)
ax[1].set_title('skeleton')
ax[1].axis('off')
ax[2].imshow(thinned, cmap=plt.cm.gray)
ax[2].set_title('thinned')
ax[2].axis('off')
ax[3].imshow(thinned_partial, cmap=plt.cm.gray)
ax[3].set_title('partially thinned')
ax[3].axis('off')
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,2 @@
Detection of features and objects
---------------------------------

View File

@ -0,0 +1,79 @@
"""
==============
Blob Detection
==============
Blobs are bright on dark or dark on bright regions in an image. In
this example, blobs are detected using 3 algorithms. The image used
in this case is the Hubble eXtreme Deep Field. Each bright dot in the
image is a star or a galaxy.
Laplacian of Gaussian (LoG)
-----------------------------
This is the most accurate and slowest approach. It computes the Laplacian
of Gaussian images with successively increasing standard deviation and
stacks them up in a cube. Blobs are local maximas in this cube. Detecting
larger blobs is especially slower because of larger kernel sizes during
convolution. Only bright blobs on dark backgrounds are detected. See
:py:meth:`skimage.feature.blob_log` for usage.
Difference of Gaussian (DoG)
----------------------------
This is a faster approximation of LoG approach. In this case the image is
blurred with increasing standard deviations and the difference between
two successively blurred images are stacked up in a cube. This method
suffers from the same disadvantage as LoG approach for detecting larger
blobs. Blobs are again assumed to be bright on dark. See
:py:meth:`skimage.feature.blob_dog` for usage.
Determinant of Hessian (DoH)
----------------------------
This is the fastest approach. It detects blobs by finding maximas in the
matrix of the Determinant of Hessian of the image. The detection speed is
independent of the size of blobs as internally the implementation uses
box filters instead of convolutions. Bright on dark as well as dark on
bright blobs are detected. The downside is that small blobs (<3px) are not
detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage.
"""
from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
image = data.hubble_deep_field()[0:500, 0:500]
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True)
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show()

View File

@ -0,0 +1,64 @@
"""
=======================
BRIEF binary descriptor
=======================
This example demonstrates the BRIEF binary description algorithm. The descriptor
consists of relatively few bits and can be computed using a set of intensity
difference tests. The short binary descriptor results in low memory footprint
and very efficient matching based on the Hamming distance metric. BRIEF does not
provide rotation-invariance. Scale-invariance can be achieved by detecting and
extracting features at different scales.
"""
from skimage import data
from skimage import transform
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.astronaut())
tform = transform.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = transform.warp(img1, tform)
img3 = transform.rotate(img1, 25)
keypoints1 = corner_peaks(corner_harris(img1), min_distance=5,
threshold_rel=0.1)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5,
threshold_rel=0.1)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5,
threshold_rel=0.1)
extractor = BRIEF()
extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors
extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors
extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
ax[0].set_title("Original Image vs. Transformed Image")
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
ax[1].set_title("Original Image vs. Transformed Image")
plt.show()

View File

@ -0,0 +1,46 @@
"""
========================
CENSURE feature detector
========================
The CENSURE feature detector is a scale-invariant center-surround detector
(CENSURE) that claims to outperform other detectors and is capable of real-time
implementation.
"""
from skimage import data
from skimage import transform
from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img_orig = rgb2gray(data.astronaut())
tform = transform.AffineTransform(scale=(1.5, 1.5), rotation=0.5,
translation=(150, -200))
img_warp = transform.warp(img_orig, tform)
detector = CENSURE()
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
detector.detect(img_orig)
ax[0].imshow(img_orig, cmap=plt.cm.gray)
ax[0].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
2 ** detector.scales, facecolors='none', edgecolors='r')
ax[0].set_title("Original Image")
detector.detect(img_warp)
ax[1].imshow(img_warp, cmap=plt.cm.gray)
ax[1].scatter(detector.keypoints[:, 1], detector.keypoints[:, 0],
2 ** detector.scales, facecolors='none', edgecolors='r')
ax[1].set_title('Transformed Image')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()

View File

@ -0,0 +1,41 @@
"""
================
Corner detection
================
Detect corner points using the Harris corner detector and determine the
subpixel position of corners ([1]_, [2]_).
.. [1] https://en.wikipedia.org/wiki/Corner_detection
.. [2] https://en.wikipedia.org/wiki/Interest_point_detection
"""
from matplotlib import pyplot as plt
from skimage import data
from skimage.feature import corner_harris, corner_subpix, corner_peaks
from skimage.transform import warp, AffineTransform
from skimage.draw import ellipse
# Sheared checkerboard
tform = AffineTransform(scale=(1.3, 1.1), rotation=1, shear=0.7,
translation=(110, 30))
image = warp(data.checkerboard()[:90, :90], tform.inverse,
output_shape=(200, 310))
# Ellipse
rr, cc = ellipse(160, 175, 10, 100)
image[rr, cc] = 1
# Two squares
image[30:80, 200:250] = 1
image[80:130, 250:300] = 1
coords = corner_peaks(corner_harris(image), min_distance=5, threshold_rel=0.02)
coords_subpix = corner_subpix(image, coords, window_size=13)
fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)
ax.plot(coords[:, 1], coords[:, 0], color='cyan', marker='o',
linestyle='None', markersize=6)
ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15)
ax.axis((0, 310, 200, 0))
plt.show()

View File

@ -0,0 +1,28 @@
"""
===============================
Dense DAISY feature description
===============================
The DAISY local image descriptor is based on gradient orientation histograms
similar to the SIFT descriptor. It is formulated in a way that allows for fast
dense extraction which is useful for e.g. bag-of-features image
representations.
In this example a limited number of DAISY descriptors are extracted at a large
scale for illustrative purposes.
"""
from skimage.feature import daisy
from skimage import data
import matplotlib.pyplot as plt
img = data.camera()
descs, descs_img = daisy(img, step=180, radius=58, rings=2, histograms=6,
orientations=8, visualize=True)
fig, ax = plt.subplots()
ax.axis('off')
ax.imshow(descs_img)
descs_num = descs.shape[0] * descs.shape[1]
ax.set_title('%i DAISY descriptors extracted:' % descs_num)
plt.show()

View File

@ -0,0 +1,130 @@
"""
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are
similar to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.brick())[shrink]
grass = img_as_float(data.grass())[shrink]
gravel = img_as_float(data.gravel())[shrink]
image_names = ('brick', 'grass', 'gravel')
images = (brick, grass, gravel)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(gravel, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel))
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()

View File

@ -0,0 +1,91 @@
"""
============================================================
Gabors / Primary Visual Cortex "Simple Cells" from an Image
============================================================
How to build a (bio-plausible) *sparse* dictionary (or 'codebook', or
'filterbank') for e.g. image classification without any fancy math and
with just standard python scientific libraries?
Please find below a short answer ;-)
This simple example shows how to get Gabor-like filters [1]_ using just
a simple image. In our example, we use a photograph of the astronaut Eileen
Collins. Gabor filters are good approximations of the "Simple Cells" [2]_
receptive fields [3]_ found in the mammalian primary visual cortex (V1)
(for details, see e.g. the Nobel-prize winning work of Hubel & Wiesel done
in the 60s [4]_ [5]_).
Here we use McQueen's 'kmeans' algorithm [6]_, as a simple biologically
plausible hebbian-like learning rule and we apply it (a) to patches of
the original image (retinal projection), and (b) to patches of an
LGN-like [7]_ image using a simple difference of gaussians (DoG)
approximation.
Enjoy ;-) And keep in mind that getting Gabors on natural image patches
is not rocket science.
.. [1] https://en.wikipedia.org/wiki/Gabor_filter
.. [2] https://en.wikipedia.org/wiki/Simple_cell
.. [3] https://en.wikipedia.org/wiki/Receptive_field
.. [4] D. H. Hubel and T. N., Wiesel Receptive Fields of Single Neurones
in the Cat's Striate Cortex, J. Physiol. pp. 574-591 (148) 1959
.. [5] D. H. Hubel and T. N., Wiesel Receptive Fields, Binocular
Interaction, and Functional Architecture in the Cat's Visual Cortex,
J. Physiol. 160 pp. 106-154 1962
.. [6] https://en.wikipedia.org/wiki/K-means_clustering
.. [7] https://en.wikipedia.org/wiki/Lateral_geniculate_nucleus
"""
import numpy as np
from scipy.cluster.vq import kmeans2
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage import data
from skimage import color
from skimage.util.shape import view_as_windows
from skimage.util import montage
np.random.seed(42)
patch_shape = 8, 8
n_filters = 49
astro = color.rgb2gray(data.astronaut())
# -- filterbank1 on original image
patches1 = view_as_windows(astro, patch_shape)
patches1 = patches1.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
fb1, _ = kmeans2(patches1, n_filters, minit='points')
fb1 = fb1.reshape((-1,) + patch_shape)
fb1_montage = montage(fb1, rescale_intensity=True)
# -- filterbank2 LGN-like image
astro_dog = ndi.gaussian_filter(astro, .5) - ndi.gaussian_filter(astro, 1)
patches2 = view_as_windows(astro_dog, patch_shape)
patches2 = patches2.reshape(-1, patch_shape[0] * patch_shape[1])[::8]
fb2, _ = kmeans2(patches2, n_filters, minit='points')
fb2 = fb2.reshape((-1,) + patch_shape)
fb2_montage = montage(fb2, rescale_intensity=True)
# -- plotting
fig, axes = plt.subplots(2, 2, figsize=(7, 6))
ax = axes.ravel()
ax[0].imshow(astro, cmap=plt.cm.gray)
ax[0].set_title("Image (original)")
ax[1].imshow(fb1_montage, cmap=plt.cm.gray)
ax[1].set_title("K-means filterbank (codebook)\non original image")
ax[2].imshow(astro_dog, cmap=plt.cm.gray)
ax[2].set_title("Image (LGN-like DoG)")
ax[3].imshow(fb2_montage, cmap=plt.cm.gray)
ax[3].set_title("K-means filterbank (codebook)\non LGN-like DoG image")
for a in ax.ravel():
a.axis('off')
fig.tight_layout()
plt.show()

View File

@ -0,0 +1,104 @@
"""
=====================
GLCM Texture Features
=====================
This example illustrates texture classification using grey level
co-occurrence matrices (GLCMs) [1]_. A GLCM is a histogram of co-occurring
greyscale values at a given offset over an image.
In this example, samples of two different textures are extracted from
an image: grassy areas and sky areas. For each patch, a GLCM with
a horizontal offset of 5 (`distance=[5]` and `angles=[0]`) is computed.
Next, two features of the GLCM matrices are computed: dissimilarity and
correlation. These are plotted to illustrate that the classes form
clusters in feature space.
In a typical classification problem, the final step (not included in
this example) would be to train a classifier, such as logistic
regression, to label image patches from new images.
References
----------
.. [1] Haralick, RM.; Shanmugam, K.,
"Textural features for image classification"
IEEE Transactions on systems, man, and cybernetics 6 (1973): 610-621.
:DOI:`10.1109/TSMC.1973.4309314`
"""
import matplotlib.pyplot as plt
from skimage.feature import greycomatrix, greycoprops
from skimage import data
PATCH_SIZE = 21
# open the camera image
image = data.camera()
# select some patches from grassy areas of the image
grass_locations = [(280, 454), (342, 223), (444, 192), (455, 455)]
grass_patches = []
for loc in grass_locations:
grass_patches.append(image[loc[0]:loc[0] + PATCH_SIZE,
loc[1]:loc[1] + PATCH_SIZE])
# select some patches from sky areas of the image
sky_locations = [(38, 34), (139, 28), (37, 437), (145, 379)]
sky_patches = []
for loc in sky_locations:
sky_patches.append(image[loc[0]:loc[0] + PATCH_SIZE,
loc[1]:loc[1] + PATCH_SIZE])
# compute some GLCM properties each patch
xs = []
ys = []
for patch in (grass_patches + sky_patches):
glcm = greycomatrix(patch, distances=[5], angles=[0], levels=256,
symmetric=True, normed=True)
xs.append(greycoprops(glcm, 'dissimilarity')[0, 0])
ys.append(greycoprops(glcm, 'correlation')[0, 0])
# create the figure
fig = plt.figure(figsize=(8, 8))
# display original image with locations of patches
ax = fig.add_subplot(3, 2, 1)
ax.imshow(image, cmap=plt.cm.gray,
vmin=0, vmax=255)
for (y, x) in grass_locations:
ax.plot(x + PATCH_SIZE / 2, y + PATCH_SIZE / 2, 'gs')
for (y, x) in sky_locations:
ax.plot(x + PATCH_SIZE / 2, y + PATCH_SIZE / 2, 'bs')
ax.set_xlabel('Original Image')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('image')
# for each patch, plot (dissimilarity, correlation)
ax = fig.add_subplot(3, 2, 2)
ax.plot(xs[:len(grass_patches)], ys[:len(grass_patches)], 'go',
label='Grass')
ax.plot(xs[len(grass_patches):], ys[len(grass_patches):], 'bo',
label='Sky')
ax.set_xlabel('GLCM Dissimilarity')
ax.set_ylabel('GLCM Correlation')
ax.legend()
# display the image patches
for i, patch in enumerate(grass_patches):
ax = fig.add_subplot(3, len(grass_patches), len(grass_patches)*1 + i + 1)
ax.imshow(patch, cmap=plt.cm.gray,
vmin=0, vmax=255)
ax.set_xlabel('Grass %d' % (i + 1))
for i, patch in enumerate(sky_patches):
ax = fig.add_subplot(3, len(sky_patches), len(sky_patches)*2 + i + 1)
ax.imshow(patch, cmap=plt.cm.gray,
vmin=0, vmax=255)
ax.set_xlabel('Sky %d' % (i + 1))
# display the patches and plot
fig.suptitle('Grey level co-occurrence matrix features', fontsize=14, y=1.05)
plt.tight_layout()
plt.show()

Some files were not shown because too many files have changed in this diff Show More