├── notebooks ├── guide.mplstyle ├── photometry │ ├── 02.03-Both.ipynb │ ├── 04.00-PSF-Photometry.ipynb │ ├── 03.05-Error-estimation.ipynb │ ├── 04.05-Error-estimation.ipynb │ ├── 03.00-Aperture-Photometry.ipynb │ ├── 03.03-Background-subtraction.ipynb │ ├── 04.03-Background-subtraction.ipynb │ ├── 02.02-Remove-local-background.ipynb │ ├── 03.06-Instrumental-magnitudes.ipynb │ ├── 04.06-Instrumental-magnitudes.ipynb │ ├── 02.01-Remove-smoothed-background.ipynb │ ├── 03.02-Choosing-the-aperture-size.ipynb │ ├── 03.04-Performing-the-photometry.ipynb │ ├── 04.04-Performing-the-photometry.ipynb │ ├── 04.01-When-NOT-to-use-PSF-photometry.ipynb │ ├── 04.02-Determining-an-instrument's-PSF.ipynb │ ├── 02.00-Background-removal-for-photometry.ipynb │ ├── 03.01-When-NOT-to-use-aperture-photometry.ipynb │ ├── 05.02-Evaluating-the-quality-of-the-transforms.ipynb │ ├── 05.00-Transforming-to-the-standard-magnitude-system.ipynb │ ├── 05.01-Removing-atmospheric-and-instrumental-effects-in-one-step.ipynb │ ├── 01.03-SExtractor-like:-sep-and-photutils.ipynb │ ├── 00.00-Preface.ipynb │ ├── 01.02-IRAF-like:-photutils.ipynb │ └── 01.00-Source-detection.ipynb ├── 07-02-Combination-with-alignment-via-WCS.ipynb ├── 07-03-Combination-with-alignment-based-on-star-positions-in-the-image.ipynb ├── wrap_script.py ├── 08-00-Image-masking.ipynb ├── download_data.py ├── link_fix.py ├── 07-00-Combining-images.ipynb ├── add_matplotlib_style.py ├── 03-00-Dark-current-and-hot-pixels.ipynb ├── 01-09-Calibration-choices-you-need-to-make.ipynb ├── Artificial-image-explorer.ipynb ├── 03-04-Handling-overscan-and-bias-for-dark-frames.ipynb ├── old-and-new-names.csv ├── README.md ├── wrap_notebook_lines.py ├── add_style_cell.ipynb ├── process_for_book.py ├── 01-00-Understanding-an-astronomical-CCD-image.ipynb ├── 05-00-Flat-corrections.ipynb ├── 08-05-incorporating-masks-into-calibrated-science-images.ipynb ├── 01-04-Nonuniform-sensitivity.ipynb ├── 00-00-Preface.ipynb ├── 02-00-Handling-overscan-trimming-and-bias-subtraction.ipynb ├── 02-04-Combine-bias-images-to-make-master.ipynb ├── convenience_functions.py ├── 03-05-Calibrate-dark-images.ipynb ├── 07-01-Creating-a-sky-flat.ipynb ├── 03-06-Combine-darks-for-use-in-later-calibration-steps.ipynb ├── test-toc.rst ├── 01-05-Calibration-overview.ipynb ├── add_github_links.py ├── image_sim.py └── 01-11-reading-images.ipynb ├── environment.yml ├── make-book.md ├── .gitignore ├── README.md ├── massey-photometry-outilne.md ├── LICENSE ├── my-ccd-guide.md ├── massey-ccd-outline.md └── generate_notebooks_from_toc.py /notebooks/guide.mplstyle: -------------------------------------------------------------------------------- 1 | axes.titlesize : 24 2 | axes.labelsize : 20 3 | 4 | xtick.labelsize : 16 5 | ytick.labelsize : 16 6 | 7 | legend.fontsize: xx-large 8 | 9 | figure.figsize : 10.0, 10.0 10 | figure.dpi : 100 11 | -------------------------------------------------------------------------------- /notebooks/photometry/02.03-Both.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Both\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/04.00-PSF-Photometry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# PSF Photometry\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/03.05-Error-estimation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Error estimation\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/04.05-Error-estimation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Error estimation\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/03.00-Aperture-Photometry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Aperture Photometry\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/03.03-Background-subtraction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Background subtraction\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/04.03-Background-subtraction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Background subtraction\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/02.02-Remove-local-background.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Remove local background\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/03.06-Instrumental-magnitudes.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Instrumental magnitudes\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/04.06-Instrumental-magnitudes.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Instrumental magnitudes\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/02.01-Remove-smoothed-background.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Remove smoothed background\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/03.02-Choosing-the-aperture-size.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Choosing the aperture size\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/03.04-Performing-the-photometry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Performing the photometry\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/04.04-Performing-the-photometry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Performing the photometry\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/07-02-Combination-with-alignment-via-WCS.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Combination with alignment via WCS\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/04.01-When-NOT-to-use-PSF-photometry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# When NOT to use PSF photometry\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/04.02-Determining-an-instrument's-PSF.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Determining an instrument's PSF\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/02.00-Background-removal-for-photometry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Background removal for photometry\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/03.01-When-NOT-to-use-aperture-photometry.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# When NOT to use aperture photometry\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/05.02-Evaluating-the-quality-of-the-transforms.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Evaluating the quality of the transforms\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/photometry/05.00-Transforming-to-the-standard-magnitude-system.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Transforming to the standard magnitude system\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: ccd-guide 2 | channels: 3 | - astropy 4 | - conda-forge 5 | dependencies: 6 | - python=3.8 7 | - astropy 8 | - astroquery 9 | - matplotlib 10 | - ipython 11 | - jupyter 12 | - ipywidgets 13 | - scipy 14 | - nose 15 | - mock 16 | - funcsigs 17 | - pillow 18 | - ccdproc 19 | - scikit-image 20 | - dask 21 | - ginga 22 | - photutils 23 | -------------------------------------------------------------------------------- /notebooks/photometry/05.01-Removing-atmospheric-and-instrumental-effects-in-one-step.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Removing atmospheric and instrumental effects in one step\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/07-03-Combination-with-alignment-based-on-star-positions-in-the-image.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Combination with alignment based on star positions in the image\n" 8 | ] 9 | } 10 | ], 11 | "metadata": {}, 12 | "nbformat": 4, 13 | "nbformat_minor": 2 14 | } 15 | -------------------------------------------------------------------------------- /notebooks/wrap_script.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import nbformat as nbf 4 | 5 | from wrap_notebook_lines import wrap_notebook_markdown 6 | 7 | notebooks = Path('.').glob('??-??-*.ipynb') 8 | 9 | for notebook in notebooks: 10 | print(f'Wrapping {notebook}...') 11 | new_source = wrap_notebook_markdown(notebook, wrap_at=80) 12 | with open(notebook, 'w') as f: 13 | nbf.write(new_source, f, version=4) 14 | -------------------------------------------------------------------------------- /make-book.md: -------------------------------------------------------------------------------- 1 | # Converting this to [`jupyter-book`](https://jupyter.org/jupyter-book/intro.html) 2 | 3 | 1. Run [`nbconvert`](https://nbconvert.readthedocs.io/en/latest/) on notebooks to generate output. 4 | 2. Change filenames to 5 | + replace periods with dash except for the one before file extension. 6 | + replace parentheses with nothing 7 | 4. Move renamed notebooks to `content` folder in `ccd-as-book` with 8 | 5. Update TOC in `ccd-as-book` if needed. 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | notebooks/__pycache__ 2 | resources 3 | .nbgrader.log 4 | .ipynb_checkpoints 5 | __pycache__ 6 | notebooks/foo 7 | notebooks/python_imred_data 8 | notebooks/reduced 9 | notebooks/converted 10 | notebooks/path 11 | notebooks/example-thermo-electric 12 | notebooks/example1-reduced 13 | notebooks/example2-reduced 14 | notebooks/example3-reduced 15 | notebooks/example-cryo-LFC 16 | notebooks/2018-07-23 17 | notebooks/all_data 18 | notebooks/sky_flat_good_raw 19 | notebooks/sky_flat_good_working 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A guide to CCD data reduction and stellar photometry using astropy and affiliated packages 2 | 3 | The inspiration for this work is two guides and a book: 4 | 5 | + [A User's Guide to Stellar CCD Photometry with IRAF](http://iraf.noao.edu/iraf/ftp/iraf/docs/daophot2.ps.Z) Massey and Davis (1992) 6 | + [A User's Guide to CCD Reductions with IRAF](A User's Guide to CCD Reductions with IRAF) Massey (1997) 7 | 8 | ## Try out the notebooks in the cloud: click the button below 9 | 10 | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/astropy/ccd-reduction-and-photometry-guide/master) 11 | -------------------------------------------------------------------------------- /notebooks/photometry/01.03-SExtractor-like:-sep-and-photutils.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# SExtractor-like: sep and photutils\n", 8 | "+ [sep: Python-wrapped innards of SExtractor](#sep:-Python-wrapped-innards-of-SExtractor)\n", 9 | "+ [photutils: image segmentation](#photutils:-image-segmentation)" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "## sep: Python-wrapped innards of SExtractor" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "## photutils: image segmentation" 24 | ] 25 | } 26 | ], 27 | "metadata": {}, 28 | "nbformat": 4, 29 | "nbformat_minor": 2 30 | } 31 | -------------------------------------------------------------------------------- /notebooks/08-00-Image-masking.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Image masking\n", 8 | "\n", 9 | "There are several reasons a particular pixel in a specific image might not be\n", 10 | "useful:\n", 11 | "\n", 12 | "+ The pixel could be \"hot,\" meaning it isn't possible to remove the dark current\n", 13 | "from the pixel.\n", 14 | "+ The pixel could be bad in the sense that it does not respond to light the way\n", 15 | "the other pixels do.\n", 16 | "+ A cosmic ray can hit the pixel during the imaging.\n", 17 | "\n", 18 | "The following notebooks walk through identifying each of those types of bad\n", 19 | "pixels and how to create a mask for them." 20 | ] 21 | } 22 | ], 23 | "metadata": { 24 | "kernelspec": { 25 | "display_name": "Python 3", 26 | "language": "python", 27 | "name": "python3" 28 | }, 29 | "language_info": { 30 | "codemirror_mode": { 31 | "name": "ipython", 32 | "version": 3 33 | }, 34 | "file_extension": ".py", 35 | "mimetype": "text/x-python", 36 | "name": "python", 37 | "nbconvert_exporter": "python", 38 | "pygments_lexer": "ipython3", 39 | "version": "3.6.8" 40 | } 41 | }, 42 | "nbformat": 4, 43 | "nbformat_minor": 4 44 | } 45 | -------------------------------------------------------------------------------- /notebooks/download_data.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import tarfile 3 | 4 | from astropy.utils.data import download_file 5 | 6 | # Get single images 7 | url = 'https://zenodo.org/record/3320113/files/combined_bias_100_images.fit.bz2?download=1' 8 | download = download_file(url, show_progress=True) 9 | p = Path(download) 10 | p.rename('combined_bias_100_images.fit.bz2') 11 | 12 | url = 'https://zenodo.org/record/3312535/files/dark-test-0002d1000.fit.bz2?download=1' 13 | download = download_file(url, show_progress=True) 14 | p = Path(download) 15 | p.rename('dark-test-0002d1000.fit.bz2') 16 | 17 | url = 'https://zenodo.org/record/3332818/files/combined_dark_300.000.fits.bz2?download=1' 18 | download = download_file(url, show_progress=True) 19 | p = Path(download) 20 | p.rename('combined_dark_300.000.fits.bz2') 21 | 22 | 23 | # Get the tarball for the smaller example 24 | url = 'https://zenodo.org/record/3254683/files/example-cryo-LFC.tar.bz2?download=1' 25 | download = download_file(url, show_progress=True, cache=True) 26 | tarball = tarfile.open(download) 27 | tarball.extractall('.') 28 | 29 | # Get the tarball for the bigger example 30 | url = 'https://zenodo.org/record/3245296/files/example-thermo-electric.tar.bz2?download=1' 31 | download = download_file(url, show_progress=True, cache=True) 32 | tarball = tarfile.open(download) 33 | tarball.extractall('.') 34 | -------------------------------------------------------------------------------- /notebooks/link_fix.py: -------------------------------------------------------------------------------- 1 | import nbformat as nbf 2 | from astropy.table import Table 3 | 4 | #oof = nbf.read('magical_transofrms.ipynb', as_version=4) 5 | 6 | # gool = '06.01-Initial-reduction.ipynb' 7 | 8 | gool = 'magical_transofrms.ipynb' 9 | 10 | def markdown_cells(nb): 11 | """ 12 | Iterator for markdown cells in notebook. 13 | """ 14 | for cell in nb['cells']: 15 | if cell['cell_type'] == "markdown": 16 | yield cell 17 | 18 | 19 | def link_fix(text, name_dict): 20 | """ 21 | Replace old file names with new in markdown links. 22 | """ 23 | new_text = text 24 | for old, new in name_dict.items(): 25 | new_text = new_text.replace(f']({old})', f']({new})') 26 | return new_text 27 | 28 | 29 | if __name__ == '__main__': 30 | names = {k: v for k, v in Table.read('old-and-new-names.csv')} 31 | 32 | for notebook_name in names.values(): 33 | try: 34 | notebook = nbf.read(notebook_name, as_version=4) 35 | except FileNotFoundError: 36 | continue 37 | 38 | for cell in markdown_cells(notebook): 39 | new_source = link_fix(cell['source'], names) 40 | if new_source != cell['source']: 41 | print(f'fixed link in {notebook_name}') 42 | cell['source'] = new_source 43 | 44 | with open(notebook_name, 'w') as f: 45 | nbf.write(notebook, f) 46 | -------------------------------------------------------------------------------- /notebooks/photometry/00.00-Preface.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Preface\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "Photometry is the measurement of the amount of light from an image. There are, broadly speaking, two types of photometry. *Aperture photometry* measures the amount of light inside a region (the aperture) of fixed size. This is the kind of photometry done by photoelectric photometers. *PSF photometry* fits the image of an object like a star to the point spread function (PSF) of the camera. It is essential to doing photometry in crowded fields.\n", 15 | "\n", 16 | "### The notebooks in this part of the guide draw heavily from the excellent [photutils documentation](https://photutils.readthedocs.io/en/stable/)" 17 | ] 18 | } 19 | ], 20 | "metadata": { 21 | "kernelspec": { 22 | "display_name": "Python 3", 23 | "language": "python", 24 | "name": "python3" 25 | }, 26 | "language_info": { 27 | "codemirror_mode": { 28 | "name": "ipython", 29 | "version": 3 30 | }, 31 | "file_extension": ".py", 32 | "mimetype": "text/x-python", 33 | "name": "python", 34 | "nbconvert_exporter": "python", 35 | "pygments_lexer": "ipython3", 36 | "version": "3.6.4" 37 | } 38 | }, 39 | "nbformat": 4, 40 | "nbformat_minor": 2 41 | } 42 | -------------------------------------------------------------------------------- /massey-photometry-outilne.md: -------------------------------------------------------------------------------- 1 | --- 2 | note: Based on Massey and Davis, A User's Guide to Stellar CCD Photometry with IRAF 3 | --- 4 | 5 | # Intro 6 | 7 | # Getting started 8 | 9 | ## Fixing your headers 10 | 11 | ### Correcting the exposure time 12 | 13 | 14 | 15 | ### Computing the effective airmass 16 | 17 | # Standard Star Photometry and Reduction 18 | 19 | ## Obtaining Aperture Photometry of your standards 20 | ## Picking an aperture size 21 | ## Setting things up 22 | ## Doing it: Aperture photometry at last 23 | 24 | ### Automatic star finding 25 | ### Photometry by eye 26 | 27 | WTF? 28 | 29 | ## Examining results: the power of txdump 30 | 31 | ## The Standard Star solutions 32 | ## Making the standard star catalog 33 | ## Making the standard star observations file 34 | ## Defining the transforms 35 | ## Solving the transformation equations 36 | 37 | # Crowded field photometry: IRAF/daophot 38 | ## Historical summary 39 | 40 | Yeah, this will need to be updated 41 | 42 | ## `daophot` overview 43 | ## How big is a star: a few useful definitions 44 | ## Setting up the parameter files "daopars" and "datapars" 45 | ## Finding stars: `daofind` and `tvmark` 46 | ## Aperture photometry with `phot` 47 | ## Making the PSF with `psf` 48 | ## Doing the PSF fitting: `allstar` 49 | ## Matching the frames 50 | ## Determining the aperture correction 51 | ## `daophot` summary 52 | 53 | # Transforming to the standard system. 54 | 55 | # imexamine: a useful tool 56 | 57 | -------------------------------------------------------------------------------- /notebooks/07-00-Combining-images.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Combining images\n", 8 | "\n", 9 | "An overview of combining images is in [Image combination](01-06-Image-combination.ipynb). The next\n", 10 | "sections discuss a couple of common cases not covered in the earlier section:\n", 11 | "\n", 12 | "+ Combining science images without aligning them to generate *sky flats*. These\n", 13 | "flats have the advantage that the light source exactly matches the spectrum of\n", 14 | "the night sky. They have the big disadvantage that the sky counts are typically\n", 15 | "very low so many, many images need to be combined to produce a reasonably low\n", 16 | "noise flat frame.\n", 17 | "+ Combining science images by aligning them using WCS information if it is\n", 18 | "present in the headers of the images." 19 | ] 20 | } 21 | ], 22 | "metadata": { 23 | "kernelspec": { 24 | "display_name": "Python 3", 25 | "language": "python", 26 | "name": "python3" 27 | }, 28 | "language_info": { 29 | "codemirror_mode": { 30 | "name": "ipython", 31 | "version": 3 32 | }, 33 | "file_extension": ".py", 34 | "mimetype": "text/x-python", 35 | "name": "python", 36 | "nbconvert_exporter": "python", 37 | "pygments_lexer": "ipython3", 38 | "version": "3.6.8" 39 | } 40 | }, 41 | "nbformat": 4, 42 | "nbformat_minor": 2 43 | } 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2018, Matt Craig and the Astropy Prohect 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /notebooks/photometry/01.02-IRAF-like:-photutils.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# IRAF-like: photutils\n", 8 | "\n", 9 | "[photutils]() provides a couple of options for stellar source detection that will be familiar to users of IRAF. One is DAOFIND and the other is IRAF's starfind. The recommendation is to use DAOFIND because it is more general than starfind (e.g. it allows elliptical sources) and detects more sources. This notebook will focus on DAOFIND, implemented in photutils by the class `DAOStarFinder`.\n", 10 | "\n", 11 | "Both methods find sources above a threshold that is specified as a multiple of the background noise level, and both require that the background be subtracted from the image.\n", 12 | "\n", 13 | "You can use any of the background subtraction methods that you like; often simply subtracting the median will be adequate, which is what we will do in this notebook. " 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "## DAOPHOT" 21 | ] 22 | } 23 | ], 24 | "metadata": { 25 | "kernelspec": { 26 | "display_name": "Python 3", 27 | "language": "python", 28 | "name": "python3" 29 | }, 30 | "language_info": { 31 | "codemirror_mode": { 32 | "name": "ipython", 33 | "version": 3 34 | }, 35 | "file_extension": ".py", 36 | "mimetype": "text/x-python", 37 | "name": "python", 38 | "nbconvert_exporter": "python", 39 | "pygments_lexer": "ipython3", 40 | "version": "3.6.4" 41 | } 42 | }, 43 | "nbformat": 4, 44 | "nbformat_minor": 2 45 | } 46 | -------------------------------------------------------------------------------- /notebooks/add_matplotlib_style.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import nbformat as nbf 4 | 5 | to_fix = Path('.').glob('??-??-?*.ipynb') 6 | 7 | 8 | style_cell = nbf.v4.new_code_cell("# Use custom style for larger fonts and figures\nplt.style.use('guide.mplstyle')") 9 | 10 | 11 | def add_cell_before(nbcells): 12 | """ 13 | Figure which cell, if any, the style file cell 14 | should be added. 15 | 16 | Parameters 17 | ---------- 18 | 19 | nbcells : list of notebook cells 20 | The cells to look at to decide if we need to add 21 | the style. 22 | 23 | Returns 24 | ------- 25 | 26 | None or int 27 | Either the index before which the new cell should be inserted 28 | or None if no insertino is needed. 29 | """ 30 | insert_before = None 31 | for idx, c in enumerate(nbcells): 32 | if c['cell_type'] != 'code': 33 | continue 34 | if ('import matplotlib' in c['source'] or 35 | 'from matplotlib' in c['source']): 36 | insert_before = idx + 1 37 | if style_cell['source'] in c['source']: 38 | insert_before = None 39 | 40 | return insert_before 41 | 42 | 43 | for nb_file in to_fix: 44 | print(f"Examining {nb_file}") 45 | with open(nb_file) as f: 46 | notebook = nbf.read(f, as_version=4) 47 | insert_at = add_cell_before(notebook['cells']) 48 | if insert_at is not None: 49 | print(f"\tInserting style cell in {nb_file}") 50 | notebook['cells'].insert(insert_at, style_cell) 51 | with open(nb_file, 'w') as f: 52 | nbf.write(notebook, f) 53 | else: 54 | print("\tNo insertion needed") 55 | -------------------------------------------------------------------------------- /notebooks/03-00-Dark-current-and-hot-pixels.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Dark current and hot pixels" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "Every image from a CCD contains *dark current*, which are counts in a raw image\n", 15 | "caused by thermal effects in the CCD.\n", 16 | "The dark current in modern CCDs is extremely small if the camera is cooled in\n", 17 | "some way. Cameras cooled with liquid nitrogen have nearly zero dark current\n", 18 | "while thermoelectrically-cooled CCDs have a somewhat larger dark current. The\n", 19 | "dark current in a CCD operating at room temperature will typically be very\n", 20 | "large.\n", 21 | "\n", 22 | "Even a camera in which the dark current is *typically* very small will have a\n", 23 | "small fraction of pixels, called hot pixels, in which the dark current is much\n", 24 | "higher.\n", 25 | "\n", 26 | "The next notebook walks through how to identify those pixels and how to decide\n", 27 | "the right way to remove dark current from your data." 28 | ] 29 | } 30 | ], 31 | "metadata": { 32 | "kernelspec": { 33 | "display_name": "Python 3", 34 | "language": "python", 35 | "name": "python3" 36 | }, 37 | "language_info": { 38 | "codemirror_mode": { 39 | "name": "ipython", 40 | "version": 3 41 | }, 42 | "file_extension": ".py", 43 | "mimetype": "text/x-python", 44 | "name": "python", 45 | "nbconvert_exporter": "python", 46 | "pygments_lexer": "ipython3", 47 | "version": "3.6.8" 48 | } 49 | }, 50 | "nbformat": 4, 51 | "nbformat_minor": 2 52 | } 53 | -------------------------------------------------------------------------------- /notebooks/photometry/01.00-Source-detection.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Source detection\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "The first step in doing photometry is deciding which objects you want photometry on. There are a few approaches to take:\n", 15 | "\n", 16 | "+ Find sources in your image and perform photometry on every source you can detect in the image.\n", 17 | "+ Perform photometry on objects from a catalog (either one you have created or from some other source); this presumes there is WCS information in the images so that catalog RA/Dec can be translated to pixel position on the image.\n", 18 | "+ Stack your images, detect the sources in that much deeper stacked image, treat those sources as your catalog and use that catlog to erform photometry." 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": {}, 24 | "source": [ 25 | "The next series of notebooks will describe how to do source detection.\n", 26 | "\n", 27 | "### The notebooks in this part of the guide draw heavily from the excellent [photutils documentation](https://photutils.readthedocs.io/en/stable/)" 28 | ] 29 | } 30 | ], 31 | "metadata": { 32 | "kernelspec": { 33 | "display_name": "Python 3", 34 | "language": "python", 35 | "name": "python3" 36 | }, 37 | "language_info": { 38 | "codemirror_mode": { 39 | "name": "ipython", 40 | "version": 3 41 | }, 42 | "file_extension": ".py", 43 | "mimetype": "text/x-python", 44 | "name": "python", 45 | "nbconvert_exporter": "python", 46 | "pygments_lexer": "ipython3", 47 | "version": "3.6.4" 48 | } 49 | }, 50 | "nbformat": 4, 51 | "nbformat_minor": 2 52 | } 53 | -------------------------------------------------------------------------------- /my-ccd-guide.md: -------------------------------------------------------------------------------- 1 | # Preface 2 | 3 | # Understanding an astronomical CCD image 4 | 5 | ## Counts, photons and electrons 6 | ### Burp 7 | ## Not all counts are light 8 | ## Your detector is not ideal 9 | ## Construction of an artificial (but realistic) image 10 | ## Calibration overview 11 | ## Image combination 12 | ## Calibration choices you need to make 13 | 14 | # Handling overscan, trimming, and bias subtraction 15 | 16 | ## Inspect your images and make a choice about next steps 17 | ## Subtract overscan, if desired 18 | ## Trim, if needed 19 | ## Combine bias images to make master 20 | 21 | # Dark current and hot pixels 22 | 23 | ## The ideal case: your dark frames measure dark current, which scales linearly with time 24 | ## Reality: most of your dark frame is noise and not all of the time dependent artifacts are dark current 25 | ## Identifying hot pixels 26 | ## Make a choice about next steps for darks 27 | ## Subtract bias, if necessary 28 | 29 | # Interlude: Image masking 30 | ## Identifying bad pixels 31 | ## Creating a mask 32 | ## incorporating the mask in reduction 33 | 34 | # Flat corrections 35 | ## There are no perfect flats 36 | ## Make a choice about next steps for flats 37 | ## Calibrating the flats 38 | ### Subtract overscan and trim, if necessary 39 | ### Subtract bias, if necessary 40 | ### Subtract dark current, scaling if necessary (scale down when possible) 41 | ## Combining flats 42 | 43 | # Reducing science images 44 | ## Initial reduction 45 | ### Subtract overscan and trim, if necessary 46 | ### Subtract bias, if necessary 47 | ### Subtract dark current, scaling if necessary (scale down when possible) 48 | ### Flat correct 49 | ## Cosmic ray removal 50 | 51 | # Combining images 52 | ## Combine without aligning to create a sky flat 53 | ## Combination with alignment via WCS 54 | ## Combination with alignment based on star positions in the image 55 | 56 | 57 | -------------------------------------------------------------------------------- /massey-ccd-outline.md: -------------------------------------------------------------------------------- 1 | --- 2 | note: The outline below is drawn from Massey 1997 guide to CCD Reductions 3 | --- 4 | 5 | # Why your data needs work and what to do about it (not likely to redo exact details) 6 | 7 | # Doing reduction 8 | 9 | ## Outline of reduction steps 10 | 11 | I imagine this will be very similar except maybe for some terminology 12 | 13 | ## Examining frames to determine trim and bias 14 | 15 | People should know they need to do this but they should also know to ask about the instrument they are using first. 16 | 17 | ## Setting things up: `setinstrument`, parameters of `ccdproc`, and ``ccdlist` 18 | 19 | No clue what this is... 20 | 21 | ## Combining Bias Frames with `zerocombine` 22 | 23 | Yeah, that is part of `ccdproc` and I don't think it needs that much emphasis? 24 | 25 | ## First pass through `ccdproc` 26 | 27 | Need to read this... 28 | 29 | ## Constructing a bad pixel mask 30 | 31 | ## Dealing with The Darks 32 | 33 | In my experience, and with electrically cooled CCDs (let alone ones in a cryo tank) the dark current is small *except* for some hot pixels. See the notebook I wrote looking at darks and how linear pixels are. 34 | 35 | ## Combining Flat-Field Exposures 36 | 37 | Yikes....stay away from technique and hope we actually have some decent flats. 38 | 39 | ## Normalizing spectroscopic flats using `response` 40 | 41 | That is likely beyond me. 42 | 43 | ## Flat-field division: `ccdproc` Pass 2 44 | 45 | ## Getting the Flat-Fielding Really RIght 46 | 47 | ### Combining the twilight/blank-sky flats 48 | 49 | ### Creating the Illumination Correction 50 | 51 | ## Finishing the flat-fielding 52 | 53 | ## Fixing bad pixels 54 | 55 | # How many and what kind of calibratino frames do you need? 56 | 57 | # INs and Outs of Combining Frames 58 | 59 | Note there is a nice paper about image combination also that talks about clipping before combining. 60 | 61 | # Summary of reduction steps 62 | 63 | ## Spectroscopic example 64 | ## Direct imaging example 65 | -------------------------------------------------------------------------------- /notebooks/01-09-Calibration-choices-you-need-to-make.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Calibration choices you need to make" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "There are a few choices you need to make about how you will calibrate your data.\n", 15 | "Sometimes the decision will be made for you by the data you have." 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "## Subtract bias and dark as separate steps or in one step?\n", 23 | "\n", 24 | "Every dark image contains bias when it comes off of the camera, so in principle\n", 25 | "you can take care of both bias and dark by constructing a master dark image that\n", 26 | "leaves the bias already present in the dark images in place.\n", 27 | "\n", 28 | "This only works if for every image from which you need to remove dark currrent\n", 29 | "you have a master dark of exactly the same exposure length.\n", 30 | "\n", 31 | "If not, you need to produce master dark images with the bias removed so that\n", 32 | "they can be scaled by exposure time." 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "## Subtract overscan or not?\n", 40 | "\n", 41 | "This is only applicable if your images have an overscan region. Subtracting\n", 42 | "overscan can be useful for removing small (typically a few counts) variations\n", 43 | "from image-to-image over the course of a night." 44 | ] 45 | } 46 | ], 47 | "metadata": { 48 | "kernelspec": { 49 | "display_name": "Python 3", 50 | "language": "python", 51 | "name": "python3" 52 | }, 53 | "language_info": { 54 | "codemirror_mode": { 55 | "name": "ipython", 56 | "version": 3 57 | }, 58 | "file_extension": ".py", 59 | "mimetype": "text/x-python", 60 | "name": "python", 61 | "nbconvert_exporter": "python", 62 | "pygments_lexer": "ipython3", 63 | "version": "3.6.7" 64 | } 65 | }, 66 | "nbformat": 4, 67 | "nbformat_minor": 2 68 | } 69 | -------------------------------------------------------------------------------- /notebooks/Artificial-image-explorer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 6, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%matplotlib inline\n", 10 | "from matplotlib import pyplot as plt\n", 11 | "import numpy as np\n", 12 | "from ipywidgets import interactive, interact\n", 13 | "\n", 14 | "from convenience_functions import show_image\n", 15 | "import image_sim as isim\n", 16 | "\n", 17 | "# Use custom style for larger fonts and figures\n", 18 | "plt.style.use('guide.mplstyle')" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 7, 24 | "metadata": {}, 25 | "outputs": [ 26 | { 27 | "data": { 28 | "application/vnd.jupyter.widget-view+json": { 29 | "model_id": "0d7e4ea8f6e241099afcd01f427e50a0", 30 | "version_major": 2, 31 | "version_minor": 0 32 | }, 33 | "text/plain": [ 34 | "interactive(children=(IntSlider(value=1100, continuous_update=False, description='bias_level', max=1200, min=1…" 35 | ] 36 | }, 37 | "metadata": {}, 38 | "output_type": "display_data" 39 | } 40 | ], 41 | "source": [ 42 | "def complete_image(bias_level=1100, read=10.0, gain=1, dark=0.1, \n", 43 | " exposure=30, hot_pixels=True, sky_counts=200):\n", 44 | " synthetic_image = np.zeros([500, 500])\n", 45 | " show_image(synthetic_image + \n", 46 | " isim.read_noise(synthetic_image, read) +\n", 47 | " isim.bias(synthetic_image, bias_level, realistic=True) + \n", 48 | " isim.dark_current(synthetic_image, dark, exposure, hot_pixels=hot_pixels) +\n", 49 | " isim.sky_background(synthetic_image, sky_counts),\n", 50 | " cmap='gray',\n", 51 | " figsize=(4, 4))\n", 52 | " \n", 53 | "i = interactive(complete_image, bias_level=(1000,1200,10), dark=(0.0,1,0.1), sky_counts=(0, 300, 50),\n", 54 | " gain=(0.5, 3.0, 0.25), read=(0, 50, 5.0),\n", 55 | " exposure=(0, 300, 30))\n", 56 | "\n", 57 | "for kid in i.children:\n", 58 | " try:\n", 59 | " kid.continuous_update = False\n", 60 | " except KeyError:\n", 61 | " pass\n", 62 | "i" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [] 71 | } 72 | ], 73 | "metadata": { 74 | "kernelspec": { 75 | "display_name": "Python 3", 76 | "language": "python", 77 | "name": "python3" 78 | }, 79 | "language_info": { 80 | "codemirror_mode": { 81 | "name": "ipython", 82 | "version": 3 83 | }, 84 | "file_extension": ".py", 85 | "mimetype": "text/x-python", 86 | "name": "python", 87 | "nbconvert_exporter": "python", 88 | "pygments_lexer": "ipython3", 89 | "version": "3.7.3" 90 | } 91 | }, 92 | "nbformat": 4, 93 | "nbformat_minor": 4 94 | } 95 | -------------------------------------------------------------------------------- /notebooks/03-04-Handling-overscan-and-bias-for-dark-frames.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Handling overscan and bias for dark frames" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "## The options for reducing dark frames\n", 15 | "\n", 16 | "The next steps to take depend on two things:\n", 17 | "\n", 18 | "1. Are you subtracting overscan? If so, you should subtract overscan for the\n", 19 | "dark frames.\n", 20 | "1. Will you need to scale these darks to a different exposure time? If so, you\n", 21 | "need to subtract bias from the darks. If not, leave the bias in." 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "### 1. Do you need to subtract overscan?\n", 29 | "\n", 30 | "If you decide to subtract the overscan from *any* of the images used in your\n", 31 | "data reduction then you must subtract overscan from *all* of the images. This\n", 32 | "includes the darks, and is independent of whether or not you intend to scale the dark\n", 33 | "frames to other exposure times.\n", 34 | "\n", 35 | "Use [`ccdproc.subtract_overscan`](https://ccdproc.readthedocs.io/en/latest/ccdproc/reduction_toolbox.html#overscan-subtraction) to remove the overscan. See the notebook XX\n", 36 | "for a discussion of overscan, and see YY for a worked example in which overscan\n", 37 | "is subtracted." 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "### 2. Do you need to scale the darks?\n", 45 | "\n", 46 | "It depends on the exposure times of the other images you need to reduce. If the\n", 47 | "other images have exposure times that match your dark frames then you do not\n", 48 | "need to scale the darks. If any other images (except bias frames, which are\n", 49 | "always zero exposure) have exposure time that do not match the exposure times of\n", 50 | "the darks, then you will need to scale the darks by exposure time.\n", 51 | "\n", 52 | "If you do need to scale the dark frames, then you should subtract the bias from\n", 53 | "them using [`ccdproc.subtract_bias`](https://ccdproc.readthedocs.io/en/latest/ccdproc/reduction_toolbox.html#subtract-bias-and-dark). Examples of using\n", 54 | "[`ccdproc.subtract_bias`](https://ccdproc.readthedocs.io/en/latest/ccdproc/reduction_toolbox.html#subtract-bias-and-dark) are in the next notebook.\n", 55 | "\n", 56 | "If you do not need to scale the dark frames to a different exposure time then do\n", 57 | "not subtract the bias from the darks. The dark frames will serve to remove both\n", 58 | "the bias and the dark current your images.\n", 59 | "\n", 60 | "\n", 61 | "As a reminder, you should try very hard to avoid scaling dark frames up to a\n", 62 | "longer exposure time because you will primarily be scaling up noise rather than\n", 63 | "dark current.\n", 64 | "\n" 65 | ] 66 | } 67 | ], 68 | "metadata": { 69 | "kernelspec": { 70 | "display_name": "Python 3", 71 | "language": "python", 72 | "name": "python3" 73 | }, 74 | "language_info": { 75 | "codemirror_mode": { 76 | "name": "ipython", 77 | "version": 3 78 | }, 79 | "file_extension": ".py", 80 | "mimetype": "text/x-python", 81 | "name": "python", 82 | "nbconvert_exporter": "python", 83 | "pygments_lexer": "ipython3", 84 | "version": "3.6.8" 85 | } 86 | }, 87 | "nbformat": 4, 88 | "nbformat_minor": 2 89 | } 90 | -------------------------------------------------------------------------------- /notebooks/old-and-new-names.csv: -------------------------------------------------------------------------------- 1 | old_name,new_name 2 | 00.00-Preface.ipynb,00-00-Preface.ipynb 3 | 01.00-Understanding-an-astronomical-CCD-image.ipynb,01-00-Understanding-an-astronomical-CCD-image.ipynb 4 | 01.03-Construction-of-an-artificial-(but-realistic)-image.ipynb,01-03-Construction-of-an-artificial-but-realistic-image.ipynb 5 | 01.04-Nonuniform-sensitivity.ipynb,01-04-Nonuniform-sensitivity.ipynb 6 | 01.05-Calibration-overview.ipynb,01-05-Calibration-overview.ipynb 7 | 01.06-Image-combination.ipynb,01-06-Image-combination.ipynb 8 | 01.08-Overscan.ipynb,01-08-Overscan.ipynb 9 | 01.09-Calibration-choices-you-need-to-make.ipynb,01-09-Calibration-choices-you-need-to-make.ipynb 10 | 01.11-reading-images.ipynb,01-11-reading-images.ipynb 11 | "02.00-Handling-overscan,-trimming,-and-bias-subtraction.ipynb",02-00-Handling-overscan-trimming-and-bias-subtraction.ipynb 12 | 02.01-Calibrating-bias-images.ipynb,02-01-Calibrating-bias-images.ipynb 13 | 02.04-Combine-bias-images-to-make-master.ipynb,02-04-Combine-bias-images-to-make-master.ipynb 14 | 03.00-Dark-current-and-hot-pixels.ipynb,03-00-Dark-current-and-hot-pixels.ipynb 15 | 03.01-Dark-current-The-ideal-case.ipynb,03-01-Dark-current-The-ideal-case.ipynb 16 | 03.02-Real-dark-current-noise-and-other-artifacts.ipynb,03-02-Real-dark-current-noise-and-other-artifacts.ipynb 17 | 03.03-Identifying-hot-pixels.ipynb,03-03-Identifying-hot-pixels.ipynb 18 | 03.04-Handling-overscan-and-bias-for-dark-frames.ipynb,03-04-Handling-overscan-and-bias-for-dark-frames.ipynb 19 | 03.05-Calibrate-dark-images.ipynb,03-05-Calibrate-dark-images.ipynb 20 | 03.06-Combine-darks-for-use-in-later-calibration-steps.ipynb,03-06-Combine-darks-for-use-in-later-calibration-steps.ipynb 21 | 04.00-Interlude:-Image-masking.ipynb,04-00-Interlude:-Image-masking.ipynb 22 | 04.01-Identifying-bad-pixels.ipynb,04-01-Identifying-bad-pixels.ipynb 23 | 04.02-Creating-a-mask.ipynb,04-02-Creating-a-mask.ipynb 24 | 04.03-incorporating-the-mask-in-reduction.ipynb,04-03-incorporating-the-mask-in-reduction.ipynb 25 | 05.00-Flat-corrections.ipynb,05-00-Flat-corrections.ipynb 26 | 05.01-There-are-no-perfect-flats.ipynb,05-01-There-are-no-perfect-flats.ipynb 27 | 05.02-Make-a-choice-about-next-steps-for-flats.ipynb,05-02-Make-a-choice-about-next-steps-for-flats.ipynb 28 | 05.03-Calibrating-the-flats.ipynb,05-03-Calibrating-the-flats.ipynb 29 | 05.04-Combining-flats.ipynb,05-04-Combining-flats.ipynb 30 | 06.00-Reducing-science-images.ipynb,06-00-Reducing-science-images.ipynb 31 | 06.01-Initial-reduction.ipynb,06-01-Initial-reduction.ipynb 32 | 06.02-Cosmic-ray-removal.ipynb,06-02-Cosmic-ray-removal.ipynb 33 | 06.03-incorporating-masks-into-calibrated-science-images.ipynb,06-03-incorporating-masks-into-calibrated-science-images.ipynb 34 | 07.00-Combining-images.ipynb,07-00-Combining-images.ipynb 35 | 07.01-Creating-a-sky-flat.ipynb,07-01-Creating-a-sky-flat.ipynb 36 | 07.02-Combination-with-alignment-via-WCS.ipynb,07-02-Combination-with-alignment-via-WCS.ipynb 37 | 07.03-Combination-with-alignment-based-on-star-positions-in-the-image.ipynb,07-03-Combination-with-alignment-based-on-star-positions-in-the-image.ipynb 38 | 03-03-Identifying-hot-pixels.ipynb,08-01-Identifying-hot-pixels.ipynb 39 | 04-02-Creating-a-mask.ipynb,08-02-Creating-a-mask.ipynb 40 | 06-03-incorporating-masks-into-calibrated-science-images.ipynb,08-03-incorporating-masks-into-calibrated-science-images.ipynb 41 | 08-03-incorporating-masks-into-calibrated-science-images.ipynb,08-05-incorporating-masks-into-calibrated-science-images.ipynb 42 | 08-00-Interlude-Image-masking.ipynb,08-00-Image-masking.ipynb 43 | 06-02-Cosmic-ray-removal.ipynb,08-03-Cosmic-ray-removal.ipynb -------------------------------------------------------------------------------- /notebooks/README.md: -------------------------------------------------------------------------------- 1 | # CCD guide: preparation for publishing 2 | 3 | Want to try to get all of the processing steps in going from notebooks to book in one place. 4 | 5 | ### Remove old outputs (in python) 6 | 7 | ```python 8 | from process_for_book import clean 9 | clean() 10 | ``` 11 | 12 | ### Generate list of notebooks to process (fish version): 13 | 14 | ```shell 15 | # The sort below is important because later notebooks depend on 16 | # output of earlier ones. 17 | set to_conv (find . -depth 1 -name 0[01234568]-\?\?-\*.ipynb | sort -) 18 | ``` 19 | 20 | ### Run the notebooks to generate output (fish version) 21 | 22 | ``shell 23 | for conv in $to_conv 24 | jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=-1 $conv 25 | end 26 | ``` 27 | 28 | ### Move the generated notebooks to separate folder (fish shown) 29 | 30 | ```shell 31 | # This can be refactored easily. 32 | python process_for_book.py 33 | ``` 34 | 35 | ### Replace links to notebooks with links to html 36 | 37 | **Modifies notebooks in the directory with the *converted* notebooks** 38 | 39 | ```python 40 | from pathlib import Path 41 | import os 42 | 43 | from process_for_book import replace_links_in_notebook 44 | 45 | os.chdir('converted') 46 | 47 | p = Path('.') 48 | 49 | notebooks = p.glob('*.ipynb') 50 | for notebook in notebooks: 51 | replace_links_in_notebook(str(notebook)) 52 | 53 | os.chdir('..') 54 | ``` 55 | 56 | ### Set GitHub token 57 | 58 | 59 | ```shell 60 | set -x GITHUB_TOKEN your_token_here 61 | ``` 62 | 63 | ### Clean up old review rounds on GitHub, if any 64 | 65 | **Set `GITHUB_TOKEN` first** 66 | 67 | ```python 68 | from add_github_links import delete_branches_prs, get_github_repo 69 | repo = get_github_repo('mwcraig', 'ccd-reduction-and-photometry-guide') 70 | # Replace the name review-8e187b6 with the actual name you want 71 | # to eliminate, of course. 72 | delete_branches_prs('review-8e187b6', repo) 73 | ``` 74 | 75 | 76 | ### Copy content from the working directory to content 77 | 78 | This is silly, but right now it needs a copy/paste. DO NOT MOVE because 79 | the logic in the link-adding code below is a little janky. 80 | 81 | ```shell 82 | cp converted/* /Users/mcraig/Documents/Research/ccd-as-book/content 83 | ``` 84 | 85 | ### Add links for commenting on each section 86 | 87 | **Set `GITHUB_TOKEN` first** 88 | 89 | ```python 90 | from add_github_links import commentify_all_notebooks 91 | converted_for_book = '/Users/mcraig/Documents/Research/ccd-as-book/content' 92 | path_to_original = '.' 93 | commentify_all_notebooks(converted_for_book, 94 | path_to_original, 95 | comment_group='review-ee0cbc6') 96 | ``` 97 | 98 | 99 | ### Build the book markdown locally 100 | 101 | ```shell 102 | # Change to root directory of book 103 | jupyter-book build . 104 | ``` 105 | 106 | ### Build/serve to check locally 107 | 108 | ```shell 109 | # Change to root directory of book 110 | make serve 111 | ``` 112 | 113 | # CCD guide message for reviewers 114 | 115 | Dear X, 116 | 117 | Thanks for agreeing to take a look at the draft guide to reducing CCD data using astropy. 118 | 119 | The most straightforward way to provide feedback does not require you to run any of the code on your computer (though that is an option if you prefer it). 120 | 121 | You will need a free account on GitHub.com to make comments, and you will need to log into GitHub. 122 | 123 | Please go to the book at https://mwcraig.github.io/ccd-as-book/00-00-Preface.html 124 | 125 | Below each section heading is a link that says "Click here to comment on this section in GitHub". 126 | 127 | Clicking on any of those links as you read through the guide will take you to the location of that section on GitHub so that you can make comments. 128 | 129 | To make a comment: 130 | 131 | + When you move your mouse over a line, a blue "plus" sign will be visible at the beginning of the line (if you are logged in to GitHub). 132 | + Click that blue plus and a box for making a comment will appear. 133 | + When you are done writing your comment, click either "Add single comment" or "Start a review". 134 | + If you click "Start a review" then you will need to complete the review by clicking on "Finish your review" in the upper right hand corner of the screen. 135 | 136 | Thanks, 137 | Matt Craig 138 | -------------------------------------------------------------------------------- /notebooks/wrap_notebook_lines.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | import re 3 | from textwrap import TextWrapper 4 | 5 | import nbformat as nbf 6 | 7 | from link_fix import markdown_cells 8 | 9 | 10 | def find_links(text): 11 | """ 12 | Find Markdown links in text and return a match object. 13 | 14 | Markdown links are expected to have the form [some txt](A-url.ext) 15 | or ![Alt text](cool-image.png). 16 | 17 | Parameters 18 | ---------- 19 | 20 | text : str 21 | Text in which to search for links. 22 | 23 | Returns 24 | ------- 25 | 26 | list 27 | List of ``re.Match`` objects, one for each link found. Each object 28 | has two named groups, 'link_text', which contains the the part between 29 | the square brackets, and 'link',which is the URL (or file name for an 30 | image). 31 | """ 32 | markdown_link = \ 33 | re.compile(r"!?\[(?P.+?\n*?.*?)\]\((?P.+?)\)", 34 | flags=re.MULTILINE) 35 | groups = [m for m in markdown_link.finditer(text)] 36 | return groups 37 | 38 | 39 | def find_latex(text): 40 | """ 41 | Find Latex equation blocks in text and return a match object. 42 | 43 | Latex blocks are expected to begin and end with double dollar signs, $$. 44 | 45 | Parameters 46 | ---------- 47 | 48 | text : str 49 | Text in which to search for latex. 50 | 51 | Returns 52 | ------- 53 | 54 | list 55 | List of ``re.Match`` objects, one for each latex block found. 56 | """ 57 | markdown_link = re.compile(r"\$\$.*?\$\$", flags=re.MULTILINE + re.DOTALL) 58 | groups = [m for m in markdown_link.finditer(text)] 59 | return groups 60 | 61 | 62 | def protect_from_wrap(text, groups, restore_info=None): 63 | """ 64 | Protect each match in groups that appears in text from wrapping by 65 | replacing it with a UUID in hex format (which won't be wrapped). 66 | 67 | Parameters 68 | ---------- 69 | 70 | text : str 71 | The text in which groups (like markdown links or latex) are to be 72 | protected from wrapping. 73 | 74 | groups : list of ``re.Match`` objects 75 | Matches from a regex search for whatever it is that needs to be 76 | protected. 77 | 78 | restore_info : dict, optional 79 | Dictionary of protected items. UUIDs are the keys and the string they 80 | represent are the values. Allows ``protect_from_wrap`` to be called 81 | multiple times with different `groups`, building up the dictionary 82 | needed to undo the protection. 83 | """ 84 | wrapped_text = text 85 | if restore_info is None: 86 | restore_info = {} 87 | 88 | # Reverse the groups so that the start positions in the string stays the 89 | # same as the text is processed. 90 | for group in groups[::-1]: 91 | link_id = uuid.uuid4().hex 92 | restore_info[link_id] = text[group.start(): group.end()] 93 | wrapped_text = (wrapped_text[:group.start()] + str(link_id) + 94 | wrapped_text[group.end():]) 95 | 96 | return wrapped_text, restore_info 97 | 98 | 99 | def restore_protected_content(text, restore_dict): 100 | for k, v in restore_dict.items(): 101 | text = text.replace(k, v) 102 | return text 103 | 104 | 105 | def wrap_notebook_markdown(nb_name, wrap_at=80): 106 | with open(nb_name) as f: 107 | nb = nbf.read(f, as_version=4) 108 | 109 | wrapper = TextWrapper(width=wrap_at, break_long_words=False, 110 | break_on_hyphens=False, 111 | replace_whitespace=False, drop_whitespace=True) 112 | 113 | for cell in markdown_cells(nb): 114 | link_groups = find_links(cell['source']) 115 | protected, restore = protect_from_wrap(cell['source'], link_groups) 116 | latex_groups = find_latex(protected) 117 | protected, restore = protect_from_wrap(protected, latex_groups, 118 | restore_info=restore) 119 | lines = protected.split('\n') 120 | 121 | new_lines = [] 122 | for line in lines: 123 | if line: 124 | new_lines.extend(wrapper.wrap(line)) 125 | else: 126 | new_lines.append('') 127 | 128 | new_source = '\n'.join(new_lines) 129 | cell['source'] = restore_protected_content(new_source, restore) 130 | 131 | return nb 132 | -------------------------------------------------------------------------------- /notebooks/add_style_cell.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "Examining 03-01-Dark-current-The-ideal-case.ipynb\n", 13 | "\tInserting style cell in 03-01-Dark-current-The-ideal-case.ipynb\n", 14 | "Examining 01-00-Understanding-an-astronomical-CCD-image.ipynb\n", 15 | "\tNo insertion needed\n", 16 | "Examining 02-00-Handling-overscan-trimming-and-bias-subtraction.ipynb\n", 17 | "\tInserting style cell in 02-00-Handling-overscan-trimming-and-bias-subtraction.ipynb\n", 18 | "Examining 02-01-Calibrating-bias-images.ipynb\n", 19 | "\tInserting style cell in 02-01-Calibrating-bias-images.ipynb\n", 20 | "Examining 08-05-incorporating-masks-into-calibrated-science-images.ipynb\n", 21 | "\tNo insertion needed\n", 22 | "Examining 00-00-Preface.ipynb\n", 23 | "\tNo insertion needed\n", 24 | "Examining 07-01-Creating-a-sky-flat.ipynb\n", 25 | "\tNo insertion needed\n", 26 | "Examining 01-03-Construction-of-an-artificial-but-realistic-image.ipynb\n", 27 | "\tInserting style cell in 01-03-Construction-of-an-artificial-but-realistic-image.ipynb\n", 28 | "Examining 03-06-Combine-darks-for-use-in-later-calibration-steps.ipynb\n", 29 | "\tInserting style cell in 03-06-Combine-darks-for-use-in-later-calibration-steps.ipynb\n", 30 | "Examining 03-04-Handling-overscan-and-bias-for-dark-frames.ipynb\n", 31 | "\tNo insertion needed\n", 32 | "Examining 08-02-Creating-a-mask.ipynb\n", 33 | "\tInserting style cell in 08-02-Creating-a-mask.ipynb\n", 34 | "Examining 05-04-Combining-flats.ipynb\n", 35 | "\tInserting style cell in 05-04-Combining-flats.ipynb\n", 36 | "Examining 08-01-Identifying-hot-pixels.ipynb\n", 37 | "\tInserting style cell in 08-01-Identifying-hot-pixels.ipynb\n", 38 | "Examining 03-00-Dark-current-and-hot-pixels.ipynb\n", 39 | "\tNo insertion needed\n", 40 | "Examining 01-11-reading-images.ipynb\n", 41 | "\tNo insertion needed\n", 42 | "Examining 03-02-Real-dark-current-noise-and-other-artifacts.ipynb\n", 43 | "\tInserting style cell in 03-02-Real-dark-current-noise-and-other-artifacts.ipynb\n", 44 | "Examining 08-03-Cosmic-ray-removal.ipynb\n", 45 | "\tInserting style cell in 08-03-Cosmic-ray-removal.ipynb\n", 46 | "Examining 01-04-Nonuniform-sensitivity.ipynb\n", 47 | "\tNo insertion needed\n", 48 | "Examining 01-09-Calibration-choices-you-need-to-make.ipynb\n", 49 | "\tNo insertion needed\n", 50 | "Examining 05-03-Calibrating-the-flats.ipynb\n", 51 | "\tInserting style cell in 05-03-Calibrating-the-flats.ipynb\n", 52 | "Examining 08-00-Image-masking.ipynb\n", 53 | "\tNo insertion needed\n", 54 | "Examining 01-08-Overscan.ipynb\n", 55 | "\tInserting style cell in 01-08-Overscan.ipynb\n", 56 | "Examining 06-00-Reducing-science-images.ipynb\n", 57 | "\tInserting style cell in 06-00-Reducing-science-images.ipynb\n", 58 | "Examining 01-06-Image-combination.ipynb\n", 59 | "\tInserting style cell in 01-06-Image-combination.ipynb\n", 60 | "Examining 02-04-Combine-bias-images-to-make-master.ipynb\n", 61 | "\tInserting style cell in 02-04-Combine-bias-images-to-make-master.ipynb\n", 62 | "Examining 07-00-Combining-images.ipynb\n", 63 | "\tNo insertion needed\n", 64 | "Examining 03-05-Calibrate-dark-images.ipynb\n", 65 | "\tNo insertion needed\n", 66 | "Examining 01-05-Calibration-overview.ipynb\n", 67 | "\tInserting style cell in 01-05-Calibration-overview.ipynb\n", 68 | "Examining 07-03-Combination-with-alignment-based-on-star-positions-in-the-image.ipynb\n", 69 | "\tNo insertion needed\n", 70 | "Examining 07-02-Combination-with-alignment-via-WCS.ipynb\n", 71 | "\tNo insertion needed\n", 72 | "Examining 05-00-Flat-corrections.ipynb\n", 73 | "\tNo insertion needed\n" 74 | ] 75 | } 76 | ], 77 | "source": [ 78 | "%run add_matplotlib_style.py" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [] 87 | } 88 | ], 89 | "metadata": { 90 | "kernelspec": { 91 | "display_name": "Python 3", 92 | "language": "python", 93 | "name": "python3" 94 | }, 95 | "language_info": { 96 | "codemirror_mode": { 97 | "name": "ipython", 98 | "version": 3 99 | }, 100 | "file_extension": ".py", 101 | "mimetype": "text/x-python", 102 | "name": "python", 103 | "nbconvert_exporter": "python", 104 | "pygments_lexer": "ipython3", 105 | "version": "3.6.8" 106 | } 107 | }, 108 | "nbformat": 4, 109 | "nbformat_minor": 4 110 | } 111 | -------------------------------------------------------------------------------- /notebooks/process_for_book.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import shutil 3 | import re 4 | 5 | import nbformat as nbf 6 | 7 | from wrap_notebook_lines import find_links 8 | from link_fix import markdown_cells 9 | 10 | input_nb_pattern = r'0[0123].*.ipynb' 11 | 12 | p = Path('.') 13 | (p / 'converted').mkdir(exist_ok=True) 14 | input_notebooks = p.glob(input_nb_pattern) 15 | 16 | 17 | def nuke_dir_tree(top): 18 | shutil.rmtree(top) 19 | 20 | 21 | def clean(): 22 | base = Path('.') 23 | nukes = ['example1-reduced', 'example2-reduced', 'example3-reduced'] 24 | for nuke in nukes: 25 | try: 26 | for p in (base / nuke).iterdir(): 27 | p.unlink() 28 | (base / nuke).rmdir() 29 | except FileNotFoundError: 30 | pass 31 | nuke_too = (base / 'path').glob('**/*.fits') 32 | try: 33 | for nuke in nuke_too: 34 | nuke.unlink() 35 | except FileNotFoundError: 36 | pass 37 | 38 | try: 39 | (base / 'example-with-cosmic-rays.fits').unlink() 40 | except FileNotFoundError: 41 | pass 42 | 43 | try: 44 | nuke_dir_tree(base / 'path') 45 | except FileNotFoundError: 46 | pass 47 | 48 | 49 | # CONVERT TO NOTEBOOK AND EXECUTE 50 | 51 | #later... 52 | # set to_conv (find . -depth 1 -name 0[01234568]-\?\?-\*.ipynb | sort -) 53 | 54 | # for conv in $to_conv 55 | # jupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=-1 $conv 56 | # end 57 | 58 | # Fix names 59 | 60 | 61 | def transform_names(old_names): 62 | new_names = [] 63 | for path in old_names: 64 | name = str(path) 65 | name = name.replace('(', '') 66 | name = name.replace(')', '') 67 | name = name.replace('.nbconvert', '') 68 | 69 | n_dots = name.count('.') - 1 70 | if n_dots: 71 | name = name.replace('.', '-', n_dots) 72 | 73 | print(name) 74 | new_names.append(name) 75 | 76 | return new_names 77 | 78 | 79 | def replace_link_urls(text, old_ext='.ipynb', new_ext='.html', path='.', 80 | verbose=True): 81 | """ 82 | Replace markdown links whose name exactly matches a local file with 83 | extension ``old_ext`` with a new link that ends ``new_ext``. 84 | 85 | Useful for turning links between notebooks into links between rendered HTML 86 | pages. 87 | 88 | Parameters 89 | ---------- 90 | 91 | text : str 92 | The text to be 1) searched for markdown links that 2) will then be 93 | transformed if appropriate. 94 | 95 | old_ext : str, optional 96 | The old (i.e. original) file name extension. 97 | 98 | new_ext : str, optional 99 | The new file name extension that will replace ``old_ext``. 100 | 101 | path : str, optional 102 | The path on which to look for an existing file. 103 | 104 | verbose: bool, optional 105 | If ``True``, print a message whenever a link is replaced. 106 | """ 107 | p = Path(path) 108 | 109 | # Identify the markdown links first. This vastly simplifies the regex 110 | # needed later for identifying links we may need to transform. 111 | links = find_links(text) 112 | 113 | new_text = text 114 | 115 | # This regex will be used to search the *url* part of a markdown link only. 116 | # It matches either a url that ends with old_ext or a url that has old_ext# 117 | # in it. That way links that include anchors will be transformed. 118 | match_ext = re.compile(r'.+' + old_ext + '$|.+' + old_ext + '#.*') 119 | 120 | # Work from the end towards the beginning of the string so 121 | # that indexes don't get messed up as we work. 122 | for link in links[::-1]: 123 | url = Path(link['link_url']) 124 | if str(url).count('#') > 1: 125 | raise ValueError(f'Do not know how to handle ' 126 | 'link {url} with so many #') 127 | try: 128 | uri, anchor = str(url).split('#') 129 | except ValueError: 130 | uri = str(url) 131 | anchor = '' 132 | 133 | if match_ext.findall(str(url)) and (p / uri).exists(): 134 | # Do not do a straight-up replace of old_ext with new_ext in case 135 | # someone tries something "clever" like foo.ipynb.ipynb. 136 | if anchor: 137 | new_url = '#'.join([str(url.with_suffix(new_ext)), anchor]) 138 | else: 139 | new_url = str(url.with_suffix(new_ext)) 140 | if verbose: 141 | print(f'Replacing {url} ------> {new_url}') 142 | new_text = (new_text[:link.start('link_url')] + str(new_url) + 143 | new_text[link.end('link_url'):]) 144 | 145 | return new_text 146 | 147 | 148 | def replace_links_in_notebook(nb_file): 149 | notebook = nbf.read(nb_file, as_version=4) 150 | for cell in markdown_cells(notebook): 151 | cell['source'] = replace_link_urls(cell['source']) 152 | with open(nb_file, 'w') as f: 153 | nbf.write(notebook, f) 154 | 155 | 156 | if __name__ == "__main__": 157 | converted_nb_pattern = '*.nbconvert.ipynb' 158 | 159 | old_names = [n for n in p.glob(converted_nb_pattern)] 160 | 161 | news = transform_names(old_names) 162 | 163 | for path, name in zip(old_names, news): 164 | path.rename(p / 'converted' / name) 165 | -------------------------------------------------------------------------------- /generate_notebooks_from_toc.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from collections import OrderedDict, defaultdict 3 | import re 4 | from pathlib import Path 5 | 6 | import nbformat as nbf 7 | 8 | 9 | def parse_toc(toc_markdown): 10 | """ 11 | Generate set of essentially empty Jupyter notebooks from a table of 12 | contents in markdown. 13 | 14 | Parameters 15 | ---------- 16 | 17 | toc_markdown : str 18 | Path to markdown file that contains only the table of contents. See 19 | Notes for a description of the format. 20 | 21 | Returns 22 | ------- 23 | 24 | list 25 | A list of lists containing the table of contents entries. 26 | 27 | Notes 28 | ----- 29 | 30 | The markdown file should contain a table of contents with entries 31 | indicated as headers in the "hashtag" format. For example, 32 | 33 | # First TOC entry, will be numbered 00 34 | # Second entry, will be numbered 01 35 | ## Section of the second entry, will be numbered 01.00 36 | ## Another section of the second entry, numbered 01.01 37 | # Third entry, numbered 02 38 | ## Section of third entry, 02.00 39 | ## Second section, 02.01 40 | ### Subsection of the second section, numbered 02.01.00 41 | 42 | Though further nesting could in principle be allowed, it isn't. 43 | """ 44 | with open(toc_markdown) as f: 45 | lines = f.readlines() 46 | 47 | toc = defaultdict(OrderedDict) 48 | #toc = defaultdict(dd) 49 | current_level = 1 50 | current_dict = toc 51 | parents = [] 52 | # Nuke blank lines and removing whitespace 53 | lines = [line.strip() for line in lines if line.strip()] 54 | for line in lines: 55 | matches = re.match(r'^(#+) +(.*)', line) 56 | level, title = matches.group(1, 2) 57 | level_n = len(level) 58 | if not current_level and level_n != 1: 59 | raise ValueError("Improperly formatted TOC") 60 | if level_n == current_level: 61 | # Just make a new entry... 62 | current_dict[title] = OrderedDict() 63 | latest_title = title 64 | if level_n > current_level: 65 | # Time for a new dictionary 66 | parents.append(current_dict) 67 | current_dict = current_dict[latest_title] 68 | current_dict[title] = OrderedDict() 69 | latest_title = title 70 | if level_n < current_level: 71 | level = current_level 72 | while level_n < level: 73 | current_dict = parents.pop() 74 | level -= 1 75 | current_dict[title] = OrderedDict() 76 | latest_title = title 77 | current_level = level_n 78 | print(level_n, title) 79 | return toc 80 | 81 | 82 | def generate_notebooks(toc, directory, parent_string='', start=0, depth=0): 83 | """ 84 | Generate notebooks/cells/anchors from a table of contents. 85 | 86 | The top two levels each get their own notebooks; third-level 87 | TOC entries are created as h2 cells in the notebook and a links 88 | to those sections are added to the second-level notebook below 89 | that notebook's title. 90 | """ 91 | results = [] 92 | for num, entry in enumerate(toc.keys()): 93 | num_str = f'{num + start:02d}' 94 | if parent_string: 95 | num_str = '.'.join([parent_string, num_str]) 96 | if len(toc[entry].keys()) > 0: 97 | kids = generate_notebooks(toc[entry], directory, 98 | parent_string=num_str, start=1, 99 | depth=depth + 1) 100 | if kids is None: 101 | kids = [] 102 | else: 103 | kids = [] 104 | if depth == 2: 105 | entry_cell = nbf.v4.new_markdown_cell(entry) 106 | results.append(entry_cell) 107 | else: 108 | notebook = nbf.v4.new_notebook() 109 | toc_kids = '' 110 | if depth == 1: 111 | # Kids are cells... 112 | toc_entries = [] 113 | for cell in kids: 114 | cell_text = cell['source'] 115 | toc_link = cell_text.replace(' ', '-') 116 | toc_entries.append(f'+ [{cell_text}](#{toc_link})') 117 | cell['source'] = '## ' + cell_text 118 | toc_kids = '\n'.join(toc_entries) 119 | 120 | else: 121 | # Top level, add a '00' to title 122 | num_str += '.00' 123 | 124 | title_cell = '\n'.join([f'# {entry}', toc_kids]) 125 | title_cell = nbf.v4.new_markdown_cell(title_cell) 126 | notebook.cells = [cell for cell in [title_cell] + kids] 127 | notebook_title = num_str + '-' + entry.replace(' ', '-') + '.ipynb' 128 | path = Path(directory) 129 | path.mkdir(exist_ok=True) 130 | nbf.write(notebook, str(path / notebook_title)) 131 | 132 | print(' ' * depth, num_str) 133 | return results 134 | 135 | 136 | if __name__ == "__main__": 137 | parser = argparse.ArgumentParser(description='Generate notebooks from ' 138 | 'markdown table of contents.') 139 | parser.add_argument('toc', 140 | help='Table of contents from which to generate ' 141 | 'the notebooks. Heading level (with #s) used ' 142 | 'to determine chapters/subsections.') 143 | parser.add_argument('--destination-dir', default='.', 144 | help='Directory to which results notebooks ' 145 | 'should be written.') 146 | 147 | args = parser.parse_args() 148 | toc = parse_toc(args.toc) 149 | generate_notebooks(toc, args.destination_dir) 150 | -------------------------------------------------------------------------------- /notebooks/01-00-Understanding-an-astronomical-CCD-image.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Understanding an astronomical CCD image\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "An astronomical image like the one shown below is essentially a two-dimensional\n", 15 | "array of values. In an ideal world, the value of each pixel (a pixel being one\n", 16 | "element of the array) would be directly proportional to the amount of light that\n", 17 | "fell on the pixel during the time the camera's shutter was open.\n", 18 | "\n", 19 | "But the ideal scenario does not in fact hold true. A solid understanding of\n", 20 | "*why* pixel values are not directly proportional to light is useful before\n", 21 | "diving into the details of image reduction." 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "## Counts, photons, and electrons\n", 29 | "\n", 30 | "The number stored in a raw astronomical image straight off a telescope is called\n", 31 | "an Analog Digital Unit (ADU) or count, because internally the camera converts\n", 32 | "the analog voltage in each pixel to a numerical count. The counts of interest to\n", 33 | "an astronomer are the ones generated via the photoelectric effect when a photon\n", 34 | "hits the detector. The number of photons (or equivalently, electrons) that reach\n", 35 | "the pixel is related to the counts in the pixel by the gain.\n", 36 | "\n", 37 | "The gain is typically provided by the manufacturer of the camera and can be\n", 38 | "measured from a combination of bias and flat images (Howell 2002; p. 71).\n", 39 | "\n", 40 | "**Take note** that trying to convert a raw image count to photons/electrons by\n", 41 | "multiplying by the gain will not be meaningful because the raw counts include\n", 42 | "contributions from sources other than light." 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "metadata": {}, 48 | "source": [ 49 | "## Not all counts are (interesting) light\n", 50 | "\n", 51 | "There are several contributions to the counts in a pixel. Image reduction is\n", 52 | "essentially the process of removing all of these except those due to light from\n", 53 | "an astronomical object:\n", 54 | "\n", 55 | "+ An offset voltage called **bias** is applied to the CCD chip to ensure there\n", 56 | "are no negative counts during readout. There are small variations in the value\n", 57 | "of the bias across the chip, and there can be small variations in the bias level\n", 58 | "over time.\n", 59 | "+ Counts can be generated in a pixel due to thermal motion of electrons in CCD;\n", 60 | "cooling a CCD reduces, but may not fully eliminate, this **dark current**. In\n", 61 | "modern CCDs the dark current is often ignorable exept for a small fraction of\n", 62 | "pixels. Dark current is typically reported in electrons/second/pixel, and\n", 63 | "depends strongly on temperature.\n", 64 | "+ There is **read noise** intrinsic to the electronics of the CCD. It is\n", 65 | "impossible to eliminate this noise (it's present in every image taken by the\n", 66 | "camera) but there are approaches to minimizing it. Read noise is typically\n", 67 | "reported in electrons as it can depend on temperature.\n", 68 | "+ Some light received by the telescope is scattered light coming from the night\n", 69 | "sky. The amount of **sky background** depends on the filter passband, the\n", 70 | "atmospheric conditions, and the local light sources.\n", 71 | "+ Though a CCD chip is fairly small, it's not unsual for **cosmic rays** to hit\n", 72 | "the chip, releasing charge that is then converted to counts.\n", 73 | "\n", 74 | "Whatever remains after taking all of those things away is, in principle, light\n", 75 | "from astronomical sources.\n", 76 | "\n", 77 | "In practice, there are additional complications." 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "metadata": {}, 83 | "source": [ 84 | "## CCDs are not perfect\n", 85 | "\n", 86 | "There are a number of issues that affect the sensitivity of the CCD to light,\n", 87 | "some of which can be corrected for and some of which cannot.\n", 88 | "\n", 89 | "+ Vignetting, a darkening of the images in the corners, is common and\n", 90 | "correctable.\n", 91 | "+ Dust in the optical path, which causes \"donuts\" or \"worms\" on the image, is\n", 92 | "also common and correctable.\n", 93 | "+ Variations in the sensitivity of individual pixels are also common and\n", 94 | "correctable.\n", 95 | "+ Dead pixels, which are pixels that don't respond to light, cannot be corrected\n", 96 | "for.\n", 97 | "\n", 98 | "**Flat** corrections attempt to remove many of these effects. The idea is to\n", 99 | "image something which is uniformly illuminated as a way to measure variations in\n", 100 | "sensitivity (regardless of cause) and compensate for them." 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "metadata": {}, 106 | "source": [ 107 | "## References\n", 108 | "\n", 109 | "Howell, S., *Handbook of CCD Astronomy*, Second Ed, Cambridge University Press\n", 110 | "2006" 111 | ] 112 | } 113 | ], 114 | "metadata": { 115 | "kernelspec": { 116 | "display_name": "Python 3", 117 | "language": "python", 118 | "name": "python3" 119 | }, 120 | "language_info": { 121 | "codemirror_mode": { 122 | "name": "ipython", 123 | "version": 3 124 | }, 125 | "file_extension": ".py", 126 | "mimetype": "text/x-python", 127 | "name": "python", 128 | "nbconvert_exporter": "python", 129 | "pygments_lexer": "ipython3", 130 | "version": "3.6.8" 131 | } 132 | }, 133 | "nbformat": 4, 134 | "nbformat_minor": 4 135 | } 136 | -------------------------------------------------------------------------------- /notebooks/05-00-Flat-corrections.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "toc": true 7 | }, 8 | "source": [ 9 | "

Table of Contents

\n", 10 | "
  • 1  What are flat\n", 14 | "corrections?
  • 2  Overview of taking images for flat\n", 18 | "corrections
  • 3  Calibrating and combining flat\n", 22 | "images
" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "# Flat corrections\n" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## What are flat corrections?\n", 37 | "\n", 38 | "The purpose of flat corrections is to compensate for any nonuniformity in the\n", 39 | "response of the CCD to light. There can be several reasons that the response is\n", 40 | "not uniform across the detector:\n", 41 | "\n", 42 | "+ Variations in the sensitivity of pixels in the detector, though this source is\n", 43 | "usually small.\n", 44 | "+ Dust on either the filter or the glass window covering the detector.\n", 45 | "+ Vignetting, a dimming in the corners of the image.\n", 46 | "+ Anything else in the optical path that affects how much light reaches the\n", 47 | "sensor.\n", 48 | "\n", 49 | "The fix for nonuniformity is the same in all cases: take an image in which\n", 50 | "the illumination is uniform and use that to measure the response of the CCD.\n", 51 | "\n", 52 | "Unfortunately, achieving uniform illumination is difficult, and uniform\n", 53 | "illumination with the same spectrum as the astronomical objects of interest is impossible." 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "## Overview of taking images for flat corrections\n", 61 | "\n", 62 | "There are a few ways of taking \"flat\" images:\n", 63 | "\n", 64 | "+ Twilight flats are images of the sky near zenith taken around sunrise or\n", 65 | "sunset.\n", 66 | "+ Dome flats are images of the inside of the dome (typically of a smooth\n", 67 | "surface, not of the dome itself), illuminated by some light source in the dome.\n", 68 | "For smaller telescopes an electroluminescent or LED illuminated panel can be\n", 69 | "used as the light source.\n", 70 | "+ Sky flats are composed of several science images.\n", 71 | "\n", 72 | "Ideally the flat images have fairly high counts (roughly half the maximum counts\n", 73 | "of the detector) so that the *only* important source of error is Poisson error\n", 74 | "due to the light in the flat images, and so that the signal-to-noise ratio in\n", 75 | "those images is essentially zero." 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "metadata": {}, 81 | "source": [ 82 | "## Calibrating and combining flat images\n", 83 | "\n", 84 | "The process of calibrating and combining flat frames is largely the same\n", 85 | "regardless of the light source being used.\n", 86 | "\n", 87 | "It is useful to think of flat frames as just like science images of stars or\n", 88 | "galaxies. The telescope is taking a picture of a light source, so bias and dark\n", 89 | "need to be removed from the individual images.\n", 90 | "\n", 91 | "When combining the images there is a new step we have not discussed yet:\n", 92 | "normalizing (also called rescaling) the calibrated flat frames to a common mean\n", 93 | "or median before combining them. In both sky and twilight flats the illumination\n", 94 | "varies naturally from frame-to-frame. If the images are not scaled to a common\n", 95 | "value before combining, then the ones taken while the sky is brighter will\n", 96 | "inappropriately dominate the result. Dome flats ought to be, in principle,\n", 97 | "perfectly stable with no time variation in their illumination. In practice,\n", 98 | "every light source varies at some level; if you are trying to correct 1%\n", 99 | "differences in illumination, then 1% fluctuations in the light source matter.\n", 100 | "\n", 101 | "Typically the mean or median is scaled to 1.0 before combining so that when the\n", 102 | "science images are divided by the calibrated, combined flats, the science image\n", 103 | "values do not change too much." 104 | ] 105 | } 106 | ], 107 | "metadata": { 108 | "kernelspec": { 109 | "display_name": "Python 3", 110 | "language": "python", 111 | "name": "python3" 112 | }, 113 | "language_info": { 114 | "codemirror_mode": { 115 | "name": "ipython", 116 | "version": 3 117 | }, 118 | "file_extension": ".py", 119 | "mimetype": "text/x-python", 120 | "name": "python", 121 | "nbconvert_exporter": "python", 122 | "pygments_lexer": "ipython3", 123 | "version": "3.6.8" 124 | }, 125 | "toc": { 126 | "base_numbering": 1, 127 | "nav_menu": {}, 128 | "number_sections": true, 129 | "sideBar": true, 130 | "skip_h1_title": true, 131 | "title_cell": "Table of Contents", 132 | "title_sidebar": "Contents", 133 | "toc_cell": true, 134 | "toc_position": {}, 135 | "toc_section_display": true, 136 | "toc_window_display": false 137 | } 138 | }, 139 | "nbformat": 4, 140 | "nbformat_minor": 2 141 | } 142 | -------------------------------------------------------------------------------- /notebooks/08-05-incorporating-masks-into-calibrated-science-images.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Incorporating masks into calibrated science images" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "There are three ways of determining which pixels in a CCD image may need to be\n", 15 | "masked (this is in addition to whatever mask or bit fields the observatory at\n", 16 | "which you are taking images may provide).\n", 17 | "\n", 18 | "Two of them are the same for all of the science images:\n", 19 | "\n", 20 | "+ Hot pixels unlikely to be properly calibrated by subtracting dark current,\n", 21 | "discussed in [Identifying hot pixels](08-01-Identifying-hot-pixels.ipynb).\n", 22 | "+ Bad pixels identified by `ccdproc.ccdmask` from flat field images, discussed\n", 23 | "in [Creating a mask with `ccdmask`](08-02-Creating-a-mask.ipynb).\n", 24 | "\n", 25 | "The third, identifying cosmic rays, discussed in\n", 26 | "[Cosmic ray removal](08-03-Cosmic-ray-removal.ipynb), will by its nature be different for each\n", 27 | "science image.\n", 28 | "\n", 29 | "The first two masks could be added to science images at the time the science\n", 30 | "images are calibrated, if desired. They are added to the science images here, as\n", 31 | "a separate step, because in many situations it is fine to omit masking entirely\n", 32 | "and there is no particular advantage to introducing it earlier." 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "We begin, as usual, with a couple of imports." 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "from pathlib import Path\n", 49 | "\n", 50 | "from astropy import units as u\n", 51 | "from astropy.nddata import CCDData\n", 52 | "\n", 53 | "import ccdproc as ccdp" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "## Read masks that are the same for all of the science images\n", 61 | "\n", 62 | "In previous notebooks we constructed a mask based on the dark current and a mask\n", 63 | "created by `ccdmask` from a flat image. Displaying the summary of the the\n", 64 | "information about the reduced images is a handy way to determine which files are\n", 65 | "the masks." 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "ex2_path = Path('example2-reduced')\n", 75 | "\n", 76 | "ifc = ccdp.ImageFileCollection(ex2_path)\n", 77 | "ifc.summary['file', 'imagetyp']" 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "metadata": {}, 83 | "source": [ 84 | "We read each of those in below, converting the mask to boolean after we read it." 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "metadata": {}, 91 | "outputs": [], 92 | "source": [ 93 | "mask_ccdmask = CCDData.read(ex2_path / 'mask_from_ccdmask.fits', unit=u.dimensionless_unscaled)\n", 94 | "mask_ccdmask.data = mask_ccdmask.data.astype('bool')\n", 95 | "\n", 96 | "mask_hot_pix = CCDData.read(ex2_path / 'mask_from_dark_current.fits', unit=u.dimensionless_unscaled)\n", 97 | "mask_hot_pix.data = mask_hot_pix.data.astype('bool')" 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | "### Combining the masks\n", 105 | "\n", 106 | "We combine the masks using a logical \"OR\" since we want to mask out pixels that are\n", 107 | "bad for any reason." 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "combined_mask = mask_ccdmask.data | mask_hot_pix.data" 117 | ] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "metadata": {}, 122 | "source": [ 123 | "It turns out we are masking roughly 0.056% of the pixels so far." 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": null, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "combined_mask.sum()" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "metadata": {}, 138 | "source": [ 139 | "## Detect cosmic rays\n", 140 | "\n", 141 | "Cosmic ray detection was discussed in detail in an\n", 142 | "[earlier section](08-03-Cosmic-ray-removal.ipynb). Here we loop over all of the calibrated\n", 143 | "science images and:\n", 144 | "\n", 145 | "+ detect cosmic rays in them,\n", 146 | "+ combine the cosmic ray mask with the mask that applies to all images,\n", 147 | "+ set the mask of the image to the overall mask, and\n", 148 | "+ save the image, overwriting the calibrated science image without the mask.\n", 149 | "\n", 150 | "Since the cosmic ray detection takes a while, a status message is displayed\n", 151 | "before each image is processed." 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": {}, 158 | "outputs": [], 159 | "source": [ 160 | "ifc.files_filtered()\n", 161 | "for ccd, file_name in ifc.ccds(imagetyp='light', return_fname=True):\n", 162 | " print('Working on file {}'.format(file_name))\n", 163 | " new_ccd = ccdp.cosmicray_lacosmic(ccd, readnoise=10, sigclip=8, verbose=True)\n", 164 | " overall_mask = new_ccd.mask | combined_mask\n", 165 | " # If there was already a mask, keep it.\n", 166 | " if ccd.mask is not None:\n", 167 | " ccd.mask = ccd.mask | overall_mask\n", 168 | " else:\n", 169 | " ccd.mask = overall_mask\n", 170 | " # Files can be overwritten only with an explicit option\n", 171 | " ccd.write(ifc.location / file_name, overwrite=True)" 172 | ] 173 | } 174 | ], 175 | "metadata": { 176 | "kernelspec": { 177 | "display_name": "Python 3", 178 | "language": "python", 179 | "name": "python3" 180 | }, 181 | "language_info": { 182 | "codemirror_mode": { 183 | "name": "ipython", 184 | "version": 3 185 | }, 186 | "file_extension": ".py", 187 | "mimetype": "text/x-python", 188 | "name": "python", 189 | "nbconvert_exporter": "python", 190 | "pygments_lexer": "ipython3", 191 | "version": "3.6.8" 192 | } 193 | }, 194 | "nbformat": 4, 195 | "nbformat_minor": 2 196 | } 197 | -------------------------------------------------------------------------------- /notebooks/01-04-Nonuniform-sensitivity.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Nonuniform sensitivity" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "## Background\n", 15 | "\n", 16 | "Not all pixels in a camera have the same sensitivity to light: there are\n", 17 | "intrinsic differences from pixel-to-pixel. Vignetting, a dimming near the\n", 18 | "corners of an image caused by the optical system to which the camera is\n", 19 | "attached, and dust on optical elements such as filters, the glass window\n", 20 | "covering the CCD, and the CCD chip itself can also block some light.\n", 21 | "\n", 22 | "Vignetting and dust can reduce the amount of light reaching the CCD chip while\n", 23 | "pixel-to-pixel sensitivity variations affects the counts read from the chip.\n", 24 | "\n", 25 | "The code to produce the simulated sensitivity map (aka flat image) is long\n", 26 | "enough that is not included in this notebook. We load it instead from\n", 27 | "[image_sim.py](image_sim.py)." 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "import numpy as np\n", 37 | "\n", 38 | "from convenience_functions import show_image\n", 39 | "import image_sim as isim" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "## A sample flat image\n", 47 | "\n", 48 | "The sample flat image below has the same size as the simulated image in the\n", 49 | "previous notebook." 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "image = np.zeros([2000, 2000])\n", 59 | "flat = isim.sensitivity_variations(image)" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "show_image(flat, cmap='gray')" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "The \"donuts\" in the image are dust on elements like filters in the optical path.\n", 76 | "Note that the size of the variations is small, a few percent at most." 77 | ] 78 | }, 79 | { 80 | "cell_type": "markdown", 81 | "metadata": {}, 82 | "source": [ 83 | "## Effect of nonuniform sensitivity on images\n", 84 | "\n", 85 | "Recall that an image read off a CCD, ignoring variations in sensitivity, can be\n", 86 | "thought of as a combination of several pieces:\n", 87 | "\n", 88 | "$$\n", 89 | "\\text{image} = \\text{bias} + \\text{noise} + \\text{dark current} + \\text{sky} + \\text{stars}\n", 90 | "$$\n", 91 | "\n", 92 | "The effect of sensitivity variations is to reduce the amount of *light* reaching\n", 93 | "the sensor. In the equation above, that means that the flat multiplies just the\n", 94 | "sky and stars portion of the input:\n", 95 | "\n", 96 | "$$\n", 97 | "\\text{image} = \\text{bias} + \\text{noise} + \\text{dark current} + \\text{flat} \\times (\\text{sky} + \\text{stars})\n", 98 | "$$\n" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "metadata": {}, 104 | "source": [ 105 | "## A realistic image" 106 | ] 107 | }, 108 | { 109 | "cell_type": "markdown", 110 | "metadata": {}, 111 | "source": [ 112 | "In the cell below we construct the last image from the previous notebook. Recall\n", 113 | "that there we used a read noise of 5 electrons/pixel, dark current of 0.1\n", 114 | "electron/pix/sec, bias level of 1100, and sky background of 20 counts." 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "gain = 1.0\n", 124 | "exposure = 30.0\n", 125 | "dark = 0.1\n", 126 | "sky_counts = 20\n", 127 | "bias_level = 1100\n", 128 | "read_noise_electrons = 5\n", 129 | "max_star_counts = 2000\n", 130 | "bias_only = isim.bias(image, bias_level, realistic=True)\n", 131 | "noise_only = isim.read_noise(image, read_noise_electrons, gain=gain)\n", 132 | "dark_only = isim.dark_current(image, dark, exposure, gain=gain, hot_pixels=True)\n", 133 | "sky_only = isim.sky_background(image, sky_counts, gain=gain)\n", 134 | "stars_only = isim.stars(image, 50, max_counts=max_star_counts)" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "metadata": {}, 140 | "source": [ 141 | "The individual pieces of the image are assembled below; it is the inclusion of\n", 142 | "the flat that makes this the closest of the simulated images to a realistic\n", 143 | "images." 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": null, 149 | "metadata": {}, 150 | "outputs": [], 151 | "source": [ 152 | "final_image = bias_only + noise_only + dark_only + flat * (sky_only + stars_only)" 153 | ] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": null, 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "show_image(final_image, cmap='gray', percu=99.9)" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "Visually, this does not look any different than the final image in the previous\n", 169 | "notebook; the effects of sensitivity variations are typically not evident in raw\n", 170 | "images unless the sky background is large.\n", 171 | "\n", 172 | "You can see the effect by artificially increasing the sky background." 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": null, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "final_image2 = bias_only + noise_only + dark_only + flat * (isim.sky_background(image, 100 * sky_counts, gain=gain) + stars_only)\n", 182 | "show_image(final_image2, cmap='gray')" 183 | ] 184 | } 185 | ], 186 | "metadata": { 187 | "kernelspec": { 188 | "display_name": "Python 3", 189 | "language": "python", 190 | "name": "python3" 191 | }, 192 | "language_info": { 193 | "codemirror_mode": { 194 | "name": "ipython", 195 | "version": 3 196 | }, 197 | "file_extension": ".py", 198 | "mimetype": "text/x-python", 199 | "name": "python", 200 | "nbconvert_exporter": "python", 201 | "pygments_lexer": "ipython3", 202 | "version": "3.6.8" 203 | } 204 | }, 205 | "nbformat": 4, 206 | "nbformat_minor": 2 207 | } 208 | -------------------------------------------------------------------------------- /notebooks/00-00-Preface.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Preface\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "The purpose of this text is to walk through image reduction and photometry using\n", 15 | "Python, especially Astropy and its affiliated packages. It assumes some basic\n", 16 | "familiarity with astronomical images and with Python. The inspiration for this\n", 17 | "work is a pair of guides written for IRAF, [\"A User's Guide to CCD Reductions with IRAF\" (Massey 1997)](http://www.ifa.hawaii.edu/~meech/a399/handouts/ccduser3.pdf) and\n", 18 | "[\"A User's Guide to Stellar CCD Photometry with IRAF\" (Massey and Davis 1992)](https://www.mn.uio.no/astro/english/services/it/help/visualization/iraf/daophot2.pdf).\n", 19 | "\n", 20 | "The focus is on optical/IR images, not spectra." 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": {}, 26 | "source": [ 27 | "## Credits" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "### Authors" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "This guide was written by Matt Craig and Lauren Chambers. Editing was done by\n", 42 | "Lauren Glattly.\n", 43 | "\n", 44 | "New contributors will be moved from the acknowledgments to the author list when\n", 45 | "they have either written roughly the equivalent of one section or provided\n", 46 | "detailed review of several sections. This is intended as a rough guideline, and\n", 47 | "when in doubt we will lean towards including people as authors rather than\n", 48 | "excluding them." 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "### Funding\n", 56 | "\n", 57 | "Made possible by the Astropy Project and ScienceBetter Consulting through\n", 58 | "financial support from the Community Software Initiative at the Space Telescope\n", 59 | "Science Institute." 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "metadata": {}, 65 | "source": [ 66 | "### Acknowledgments\n", 67 | "\n", 68 | "The following people contributed to this work by making suggestions, testing\n", 69 | "code, or providing feedback on drafts. We are greatful for their assistance!\n", 70 | "\n", 71 | "+ Simon Conseil\n", 72 | "+ Lia Corrales\n", 73 | "+ Kelle Cruz\n", 74 | "+ Adam Ginsburg\n", 75 | "+ Richard Hendricks\n", 76 | "+ Stuart Littlefair\n", 77 | "+ Isobel Snellenberger\n", 78 | "+ Kris Stern\n", 79 | "+ Thomas Stibor\n", 80 | "\n", 81 | "If you have provided feedback and are not listed above, we apologize -- please\n", 82 | "[open an issue here](https://github.com/astropy/ccd-reduction-and-photometry-guide/issues/new) so we can fix it." 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## Resources\n", 90 | "\n", 91 | "This astronomical content work was inspired by, and guided by, the excellent\n", 92 | "resources below:\n", 93 | "\n", 94 | "+ [\"A User's Guide to CCD Reductions with IRAF\" (Massey 1997)](http://www.ifa.hawaii.edu/~meech/a399/handouts/ccduser3.pdf) is very thorough, but IRAF has become more\n", 95 | "difficult to install over time and is no longer supported.\n", 96 | "+ [\"A User's Guide to Stellar CCD Photometry with IRAF\" (Massey and Davis 1992)](https://www.mn.uio.no/astro/english/services/it/help/visualization/iraf/daophot2.pdf).\n", 97 | "+ [The Handbook of Astronomical Image Processing](https://www.amazon.com/Handbook-Astronomical-Image-Processing/dp/0943396824) by Richard Berry and James Burnell. This\n", 98 | "provides a very detailed overview of data reduction and photometry. One virtue\n", 99 | "is its inclusion of *real* images with defects.\n", 100 | "+ The [AAVSO CCD Obseving Manual](https://www.aavso.org/sites/default/files/publications_files/ccd_photometry_guide/CCDPhotometryGuide.pdf) provides a complete introduction to CCD data reduction and photometry. \n", 101 | "+ [A Beginner's Guide to Working with Astronomical Data](https://arxiv.org/abs/1905.13189) is much broader than this guide. It\n", 102 | "includes an introduction to Python." 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "## Software setup\n", 110 | "\n", 111 | "The recommended way to get set up to use this guide is to use the\n", 112 | "[Anaconda Python distribution](https://www.anaconda.com/download/) (or the much smaller\n", 113 | "[miniconda installer](https://conda.io/miniconda.html)). Once you have that, you can install\n", 114 | "everything you need with:\n", 115 | "\n", 116 | "```\n", 117 | "conda install -c astropy ccdproc photutils ipywidgets matplotlib\n", 118 | "```" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "metadata": {}, 124 | "source": [ 125 | "## Data files\n", 126 | "\n", 127 | "The list of the data files, and their approximate sizes, is below. You can\n", 128 | "either download them one by one, or use the download helper included with these\n", 129 | "notebooks.\n", 130 | "\n", 131 | "### Use this in a terminal to download the data\n", 132 | "\n", 133 | "```console\n", 134 | "$ python download_data.py\n", 135 | "```\n", 136 | "\n", 137 | "### Use this in a notebook cell to download the data\n", 138 | "\n", 139 | "```python\n", 140 | "%run download_data.py\n", 141 | "```\n", 142 | "\n", 143 | "### List of data files\n", 144 | "\n", 145 | "+ [Combination of 100 bias images (26MB)](https://zenodo.org/record/3320113/files/combined_bias_100_images.fit.bz2?download=1) (DOI: https://doi.org/10.5281/zenodo.3320113)\n", 146 | "+ [Single dark frame, exposure time 1,000 seconds (11MB)](https://zenodo.org/record/3312535/files/dark-test-0002d1000.fit.bz2?download=1) (DOI: https://doi.org/10.5281/zenodo.3312535)\n", 147 | "+ [Combination of several dark frames, each 1,000 exposure time (52MB)](https://zenodo.org/record/2634177/files/master_dark_exposure_1000.0.fit.bz2?download=1) (DOI: https://doi.org/10.5281/zenodo.2634177)\n", 148 | "+ [Combination of several dark frames, each 300 sec (7MB)](https://zenodo.org/record/3332818/files/combined_dark_300.000.fits.bz2?download=1) (DOI: https://doi.org/10.5281/zenodo.3332818)\n", 149 | "+ **\"Example 1\" in the reduction notebooks:** [Several images from the Palomar Large Format Camera, Chip 0 **(162MB)**](https://zenodo.org/record/3254683/files/example-cryo-LFC.tar.bz2?download=1)\n", 150 | "(DOI: https://doi.org/10.5281/zenodo.3254683)\n", 151 | "+ **\"Example 2\" in the reduction notebooks:** [Several images from an Andor Aspen CG16M **(483MB)**](https://zenodo.org/record/3245296/files/example-thermo-electric.tar.bz2?download=1)\n", 152 | "(DOI: https://doi.org/10.5281/zenodo.3245296)" 153 | ] 154 | } 155 | ], 156 | "metadata": { 157 | "kernelspec": { 158 | "display_name": "Python 3", 159 | "language": "python", 160 | "name": "python3" 161 | }, 162 | "language_info": { 163 | "codemirror_mode": { 164 | "name": "ipython", 165 | "version": 3 166 | }, 167 | "file_extension": ".py", 168 | "mimetype": "text/x-python", 169 | "name": "python", 170 | "nbconvert_exporter": "python", 171 | "pygments_lexer": "ipython3", 172 | "version": "3.7.3" 173 | } 174 | }, 175 | "nbformat": 4, 176 | "nbformat_minor": 4 177 | } 178 | -------------------------------------------------------------------------------- /notebooks/02-00-Handling-overscan-trimming-and-bias-subtraction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Handling overscan, trimming, and bias subtraction\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "## Introduction" 15 | ] 16 | }, 17 | { 18 | "cell_type": "markdown", 19 | "metadata": {}, 20 | "source": [ 21 | "The bias in a CCD camera is a DC offset applied to all pixels so that when the\n", 22 | "voltage in each pixel is converted to a number, the number will always be\n", 23 | "positive. In an ideal CCD the bias would be the same for every pixel and not\n", 24 | "change over time. In practice, the bias is slightly different for each pixel,\n", 25 | "and can vary by a count or two from night to night or during a night.\n", 26 | "\n", 27 | "A bias *image* is a picture taken with the shutter closed and zero exposure\n", 28 | "time; think about it as a command to the camera to do whatever it usually does\n", 29 | "to prepare the camera's electronics to take an image and then immediately read\n", 30 | "out the CCD as though you had taken a picture." 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "## Sample bias images\n", 38 | "\n", 39 | "The images below are a single bias frame and an average 100 bias frames from an\n", 40 | "[Andor Apogee Aspen CG16M](http://www.andor.com/pdfs/specifications/Apogee_Aspen_CG16M_Specifications.pdf), a low-end 4k × 4k CCD with a\n", 41 | "[Kodak KAF-16803 sensor chip](http://www.onsemi.com/pub/Collateral/KAF-16803-D.PDF). That model camera has a typical bias level\n", 42 | "around 1000 and read noise around 10 $e^-$, though the precise value varies from\n", 43 | "camera to camera and with temperature." 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "%load_ext autoreload\n", 53 | "%autoreload 2" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "%matplotlib inline\n", 63 | "import matplotlib.pyplot as plt" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "# Use custom style for larger fonts and figures\n", 73 | "plt.style.use('guide.mplstyle')" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "from astropy.nddata import CCDData\n", 83 | "from astropy.visualization import hist\n", 84 | "\n", 85 | "import numpy as np\n", 86 | "\n", 87 | "from convenience_functions import show_image\n", 88 | "\n", 89 | "download_base_url = 'http://physics.mnstate.edu/craig/ccd-guide/'" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "one_bias = CCDData.read(download_base_url + 'dark-test-0100bias.fit.gz', unit='adu')\n", 99 | "one_hundred_bias = CCDData.read(download_base_url + 'master_bias.fit.gz', unit='adu')" 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": null, 105 | "metadata": {}, 106 | "outputs": [], 107 | "source": [ 108 | "fig, (ax_1_bias, ax_avg_bias) = plt.subplots(1, 2, figsize=(30, 15))\n", 109 | "\n", 110 | "show_image(one_bias.data, cmap='gray', ax=ax_1_bias, fig=fig, input_ratio=8)\n", 111 | "ax_1_bias.set_title('Single bias image')\n", 112 | "show_image(one_hundred_bias.data, cmap='gray', ax=ax_avg_bias, fig=fig, input_ratio=8)\n", 113 | "ax_avg_bias.set_title('100 bias images combined');" 114 | ] 115 | }, 116 | { 117 | "cell_type": "markdown", 118 | "metadata": {}, 119 | "source": [ 120 | "### Note a few things\n", 121 | "\n", 122 | "+ The bias level in this specific camera is about 1023 (the mid-range of the\n", 123 | "colorbar).\n", 124 | "+ The image is brighter on the left and right edges. This \"amplifier glow\" is\n", 125 | "frequently present and caused by the CCD electronics (photosensors with an\n", 126 | "applied voltage are LEDs).\n", 127 | "+ There are several vertical lines; these are columns for which the bias level\n", 128 | "is consistently higher.\n", 129 | "+ There is noticeable \"static\" in the images; that is read noise.\n", 130 | "+ None of the variations are particularly large.\n", 131 | "+ Combining several bias images vastly reduces the read noise. This example is a\n", 132 | "little unrealistic in that 100 bias images were combined, but it still illustrates the\n", 133 | "idea that combining images reduces noise." 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "metadata": {}, 139 | "source": [ 140 | "## Impact of combining images on noise\n", 141 | "\n", 142 | "As discussed at length in the [notebook on combination](01-06-Image-combination.ipynb), the reason for\n", 143 | "taking and combining several calibration images is to reduce the noise if the\n", 144 | "images are used for calibration. The difference between a single image and a\n", 145 | "combination of images is apparent in the images above. Another way to see the\n", 146 | "impact of combining images is in the histogram of pixel values. Notice that the\n", 147 | "distribution of values is much narrower for the combined image than for a single\n", 148 | "bias. Pixels near the edges, where the amplifier glow is large, are binned\n", 149 | "separately from the rest of the pixels to emphasize the uniformity of the chip\n", 150 | "away from the glow." 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "plt.figure(figsize=(20, 10))\n", 160 | "hist(one_bias.data[:, 15:-30].flatten(), bins=800, alpha=0.4, label='One bias', color='deepskyblue')\n", 161 | "hist(np.concatenate((one_bias.data[:, :15].flatten(), one_bias.data[:, -30:].flatten())), bins=400, alpha=0.2, label='One bias (edges only)', color='lightskyblue')\n", 162 | "#hist(, bins=800, alpha=0.2, label='One bias (edges only)', color='darkblue')\n", 163 | "hist(one_hundred_bias.data[:, 15:-30].flatten(), bins=800, alpha=0.4, label='One hundred bias images', color='darkgreen')\n", 164 | "hist(np.concatenate((one_hundred_bias.data[:, :15].flatten(), one_hundred_bias.data[:, -30:].flatten())), bins=800, alpha=0.4, label='One hundred bias images (edges only)', color='lightgreen')\n", 165 | "\n", 166 | "#hist(one_hundred_bias.data[:, :15].flatten(), bins=800, alpha=0.4, label='One hundred bias images', color='darkgreen')\n", 167 | "\n", 168 | "plt.grid()\n", 169 | "plt.xlim(975, 1400)\n", 170 | "plt.legend()\n", 171 | "plt.xlabel('Pixel value')\n", 172 | "plt.ylabel('Number of pixels')\n", 173 | "plt.semilogy();" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "metadata": {}, 179 | "source": [ 180 | "## Bias calibration overview" 181 | ] 182 | }, 183 | { 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "The progression here is to \"calibrate\" the bias images by subtracting overscan,\n", 188 | "if desired, trim the overscan from the bias images if it is present, and combine\n", 189 | "all of the bias images to make a \"combined\" bias (another common term for these\n", 190 | "images is \"master\" bias and occasionally \"super\" bias)." 191 | ] 192 | } 193 | ], 194 | "metadata": { 195 | "kernelspec": { 196 | "display_name": "Python 3", 197 | "language": "python", 198 | "name": "python3" 199 | }, 200 | "language_info": { 201 | "codemirror_mode": { 202 | "name": "ipython", 203 | "version": 3 204 | }, 205 | "file_extension": ".py", 206 | "mimetype": "text/x-python", 207 | "name": "python", 208 | "nbconvert_exporter": "python", 209 | "pygments_lexer": "ipython3", 210 | "version": "3.6.8" 211 | } 212 | }, 213 | "nbformat": 4, 214 | "nbformat_minor": 4 215 | } 216 | -------------------------------------------------------------------------------- /notebooks/02-04-Combine-bias-images-to-make-master.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Combine bias images to make master\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "The final step is to combine the individual calibrated bias images into a single\n", 15 | "combined image. That combined image will have less noise than the individual\n", 16 | "images, minimizing the noise added to the remaining images when the bias is\n", 17 | "subtracted.\n", 18 | "\n", 19 | "Regardless of which path you took through the calibration of the biases (with\n", 20 | "overscan or without), there should be a folder named `reduced` that contains the\n", 21 | "calibrated bias images. If there is not, please run the previous notebook before\n", 22 | "continuing with this one." 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "from pathlib import Path\n", 32 | "import os\n", 33 | "\n", 34 | "from astropy.nddata import CCDData\n", 35 | "from astropy.stats import mad_std\n", 36 | "\n", 37 | "import ccdproc as ccdp\n", 38 | "import matplotlib.pyplot as plt\n", 39 | "import numpy as np\n", 40 | "\n", 41 | "from convenience_functions import show_image" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "# Use custom style for larger fonts and figures\n", 51 | "plt.style.use('guide.mplstyle')" 52 | ] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "metadata": {}, 57 | "source": [ 58 | "## Recommended settings for image combination\n", 59 | "\n", 60 | "As discussed in the [notebook about combining images](01-06-Image-combination.ipynb), the recommendation is\n", 61 | "that you combine by averaging the individual images but sigma clip to remove\n", 62 | "extreme values.\n", 63 | "\n", 64 | "[ccdproc](https://ccdproc.readthedocs.org) provides two ways to combine:\n", 65 | "\n", 66 | "+ An object-oriented interface built around the `Combiner` object, described in\n", 67 | "the [ccdproc documentation on image combination](https://ccdproc.readthedocs.io/en/latest/image_combination.html).\n", 68 | "+ A function called [`combine`](https://ccdproc.readthedocs.io/en/latest/api/ccdproc.combine.html#ccdproc.combine), which we will use here because the function\n", 69 | "allows you to specify the maximum amount of memory that should be used during\n", 70 | "combination. This feature can be essential depending on how many images you need\n", 71 | "to combine, how big they are, and how much memory your computer has.\n", 72 | "\n", 73 | "*NOTE: If using a version of ccdproc lower than 2.0, set the memory limit a\n", 74 | "factor of 2-3 lower than you want the maximum memory consumption to be.*" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": {}, 80 | "source": [ 81 | "## Example 1: Cryogenically-cooled camera" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": {}, 87 | "source": [ 88 | "The remainder of this section assumes the calibrated bias images are in the\n", 89 | "folder `example1-reduced` which is created in the previous notebook." 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "calibrated_path = Path('example1-reduced')\n", 99 | "reduced_images = ccdp.ImageFileCollection(calibrated_path)" 100 | ] 101 | }, 102 | { 103 | "cell_type": "markdown", 104 | "metadata": {}, 105 | "source": [ 106 | "The code below:\n", 107 | "\n", 108 | "+ selects the calibrated bias images,\n", 109 | "+ combines them using the `combine` function,\n", 110 | "+ adds the keyword `COMBINED` to the header so that later calibration steps can\n", 111 | "easily identify which bias to use, and\n", 112 | "+ writes the file." 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "calibrated_biases = reduced_images.files_filtered(imagetyp='bias', include_path=True)\n", 122 | "\n", 123 | "combined_bias = ccdp.combine(calibrated_biases,\n", 124 | " method='average',\n", 125 | " sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,\n", 126 | " sigma_clip_func=np.ma.median, sigma_clip_dev_func=mad_std,\n", 127 | " mem_limit=350e6\n", 128 | " )\n", 129 | "\n", 130 | "combined_bias.meta['combined'] = True\n", 131 | "\n", 132 | "combined_bias.write(calibrated_path / 'combined_bias.fit')" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "metadata": {}, 138 | "source": [ 139 | "### Result for Example 1\n", 140 | "\n", 141 | "A single calibrated image and the combined image are shown below. There is\n", 142 | "significant two-dimensional structure in the bias that cannot easily be removed\n", 143 | "by subtracting only the overscan in the next image reduction steps. It takes\n", 144 | "little time to acquire bias images and doing so will result in higher quality\n", 145 | "science images." 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": null, 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))\n", 155 | "\n", 156 | "show_image(CCDData.read(calibrated_biases[0]).data, cmap='gray', ax=ax1, fig=fig, percl=90)\n", 157 | "ax1.set_title('Single calibrated bias')\n", 158 | "show_image(combined_bias.data, cmap='gray', ax=ax2, fig=fig, percl=90)\n", 159 | "ax2.set_title('{} bias images combined'.format(len(calibrated_biases)))" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "## Example 2: Thermo-electrically cooled camera\n", 167 | "\n", 168 | "The process for combining the images is exactly the same as in example 1. The\n", 169 | "only difference is the directory that contains the calibrated bias frames." 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": null, 175 | "metadata": {}, 176 | "outputs": [], 177 | "source": [ 178 | "calibrated_path = Path('example2-reduced')\n", 179 | "reduced_images = ccdp.ImageFileCollection(calibrated_path)" 180 | ] 181 | }, 182 | { 183 | "cell_type": "markdown", 184 | "metadata": {}, 185 | "source": [ 186 | "The code below:\n", 187 | "\n", 188 | "+ selects the calibrated bias images,\n", 189 | "+ combines them using the `combine` function,\n", 190 | "+ adds the keyword `COMBINED` to the header so that later calibration steps can\n", 191 | "easily identify which bias to use, and\n", 192 | "+ writes the file." 193 | ] 194 | }, 195 | { 196 | "cell_type": "code", 197 | "execution_count": null, 198 | "metadata": {}, 199 | "outputs": [], 200 | "source": [ 201 | "calibrated_biases = reduced_images.files_filtered(imagetyp='bias', include_path=True)\n", 202 | "\n", 203 | "combined_bias = ccdp.combine(calibrated_biases,\n", 204 | " method='average',\n", 205 | " sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,\n", 206 | " sigma_clip_func=np.ma.median, signma_clip_dev_func=mad_std,\n", 207 | " mem_limit=350e6\n", 208 | " )\n", 209 | "\n", 210 | "combined_bias.meta['combined'] = True\n", 211 | "\n", 212 | "combined_bias.write(calibrated_path / 'combined_bias.fit')" 213 | ] 214 | }, 215 | { 216 | "cell_type": "markdown", 217 | "metadata": {}, 218 | "source": [ 219 | "### Result for Example 2\n", 220 | "\n", 221 | "The difference between a single calibrated bias image and the combined bias\n", 222 | "image is much clearer in this case." 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": null, 228 | "metadata": {}, 229 | "outputs": [], 230 | "source": [ 231 | "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))\n", 232 | "\n", 233 | "show_image(CCDData.read(calibrated_biases[0]).data, cmap='gray', ax=ax1, fig=fig)\n", 234 | "ax1.set_title('Single calibrated bias')\n", 235 | "show_image(combined_bias.data, cmap='gray', ax=ax2, fig=fig)\n", 236 | "ax2.set_title('{} bias images combined'.format(len(calibrated_biases)))" 237 | ] 238 | } 239 | ], 240 | "metadata": { 241 | "kernelspec": { 242 | "display_name": "Python 3", 243 | "language": "python", 244 | "name": "python3" 245 | }, 246 | "language_info": { 247 | "codemirror_mode": { 248 | "name": "ipython", 249 | "version": 3 250 | }, 251 | "file_extension": ".py", 252 | "mimetype": "text/x-python", 253 | "name": "python", 254 | "nbconvert_exporter": "python", 255 | "pygments_lexer": "ipython3", 256 | "version": "3.6.8" 257 | } 258 | }, 259 | "nbformat": 4, 260 | "nbformat_minor": 4 261 | } 262 | -------------------------------------------------------------------------------- /notebooks/convenience_functions.py: -------------------------------------------------------------------------------- 1 | from astropy import visualization as aviz 2 | from astropy.nddata.utils import block_reduce, Cutout2D 3 | from matplotlib import pyplot as plt 4 | 5 | 6 | def show_image(image, 7 | percl=99, percu=None, is_mask=False, 8 | figsize=(10, 10), 9 | cmap='viridis', log=False, clip=True, 10 | show_colorbar=True, show_ticks=True, 11 | fig=None, ax=None, input_ratio=None): 12 | """ 13 | Show an image in matplotlib with some basic astronomically-appropriat stretching. 14 | 15 | Parameters 16 | ---------- 17 | image 18 | The image to show 19 | percl : number 20 | The percentile for the lower edge of the stretch (or both edges if ``percu`` is None) 21 | percu : number or None 22 | The percentile for the upper edge of the stretch (or None to use ``percl`` for both) 23 | figsize : 2-tuple 24 | The size of the matplotlib figure in inches 25 | """ 26 | if percu is None: 27 | percu = percl 28 | percl = 100 - percl 29 | 30 | if (fig is None and ax is not None) or (fig is not None and ax is None): 31 | raise ValueError('Must provide both "fig" and "ax" ' 32 | 'if you provide one of them') 33 | elif fig is None and ax is None: 34 | if figsize is not None: 35 | # Rescale the fig size to match the image dimensions, roughly 36 | image_aspect_ratio = image.shape[0] / image.shape[1] 37 | figsize = (max(figsize) * image_aspect_ratio, max(figsize)) 38 | 39 | fig, ax = plt.subplots(1, 1, figsize=figsize) 40 | 41 | 42 | # To preserve details we should *really* downsample correctly and 43 | # not rely on matplotlib to do it correctly for us (it won't). 44 | 45 | # So, calculate the size of the figure in pixels, block_reduce to 46 | # roughly that,and display the block reduced image. 47 | 48 | # Thanks, https://stackoverflow.com/questions/29702424/how-to-get-matplotlib-figure-size 49 | fig_size_pix = fig.get_size_inches() * fig.dpi 50 | 51 | ratio = (image.shape // fig_size_pix).max() 52 | 53 | if ratio < 1: 54 | ratio = 1 55 | 56 | ratio = input_ratio or ratio 57 | 58 | reduced_data = block_reduce(image, ratio) 59 | 60 | if not is_mask: 61 | # Divide by the square of the ratio to keep the flux the same in the 62 | # reduced image. We do *not* want to do this for images which are 63 | # masks, since their values should be zero or one. 64 | reduced_data = reduced_data / ratio**2 65 | 66 | # Of course, now that we have downsampled, the axis limits are changed to 67 | # match the smaller image size. Setting the extent will do the trick to 68 | # change the axis display back to showing the actual extent of the image. 69 | extent = [0, image.shape[1], 0, image.shape[0]] 70 | 71 | if log: 72 | stretch = aviz.LogStretch() 73 | else: 74 | stretch = aviz.LinearStretch() 75 | 76 | norm = aviz.ImageNormalize(reduced_data, 77 | interval=aviz.AsymmetricPercentileInterval(percl, percu), 78 | stretch=stretch, clip=clip) 79 | 80 | if is_mask: 81 | # The image is a mask in which pixels should be zero or one. 82 | # block_reduce may have changed some of the values, so reset here. 83 | reduced_data = reduced_data > 0 84 | # Set the image scale limits appropriately. 85 | scale_args = dict(vmin=0, vmax=1) 86 | else: 87 | scale_args = dict(norm=norm) 88 | 89 | im = ax.imshow(reduced_data, origin='lower', 90 | cmap=cmap, extent=extent, aspect='equal', **scale_args) 91 | 92 | if show_colorbar: 93 | # I haven't a clue why the fraction and pad arguments below work to make 94 | # the colorbar the same height as the image, but they do....unless the image 95 | # is wider than it is tall. Sticking with this for now anyway... 96 | # Thanks: https://stackoverflow.com/a/26720422/3486425 97 | fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04) 98 | # In case someone in the future wants to improve this: 99 | # https://joseph-long.com/writing/colorbars/ 100 | # https://stackoverflow.com/a/33505522/3486425 101 | # https://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes 102 | 103 | if not show_ticks: 104 | ax.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False) 105 | 106 | 107 | def image_snippet(image, center, width=50, axis=None, fig=None, 108 | is_mask=False, pad_black=False, **kwargs): 109 | """ 110 | Display a subsection of an image about a center. 111 | 112 | Parameters 113 | ---------- 114 | 115 | image : numpy array 116 | The full image from which a section is to be taken. 117 | 118 | center : list-like 119 | The location of the center of the cutout. 120 | 121 | width : int, optional 122 | Width of the cutout, in pixels. 123 | 124 | axis : matplotlib.Axes instance, optional 125 | Axis on which the image should be displayed. 126 | 127 | fig : matplotlib.Figure, optional 128 | Figure on which the image should be displayed. 129 | 130 | is_mask : bool, optional 131 | Set to ``True`` if the image is a mask, i.e. all values are 132 | either zero or one. 133 | 134 | pad_black : bool, optional 135 | If ``True``, pad edges of the image with zeros to fill out width 136 | if the slice is near the edge. 137 | """ 138 | if pad_black: 139 | sub_image = Cutout2D(image, center, width, mode='partial', fill_value=0) 140 | else: 141 | # Return a smaller subimage if extent goes out side image 142 | sub_image = Cutout2D(image, center, width, mode='trim') 143 | show_image(sub_image.data, cmap='gray', ax=axis, fig=fig, 144 | show_colorbar=False, show_ticks=False, is_mask=is_mask, 145 | **kwargs) 146 | 147 | 148 | def _mid(sl): 149 | return (sl.start + sl.stop) // 2 150 | 151 | 152 | def display_cosmic_rays(cosmic_rays, images, titles=None, 153 | only_display_rays=None): 154 | """ 155 | Display cutouts of the region around each cosmic ray and the other images 156 | passed in. 157 | 158 | Parameters 159 | ---------- 160 | 161 | cosmic_rays : photutils.segmentation.SegmentationImage 162 | The segmented cosmic ray image returned by ``photuils.detect_source``. 163 | 164 | images : list of images 165 | The list of images to be displayed. Each image becomes a column in 166 | the generated plot. The first image must be the cosmic ray mask. 167 | 168 | titles : list of str 169 | Titles to be put above the first row of images. 170 | 171 | only_display_rays : list of int, optional 172 | The number of the cosmic ray(s) to display. The default value, 173 | ``None``, means display them all. The number of the cosmic ray is 174 | its index in ``cosmic_rays``, which is also the number displayed 175 | on the mask. 176 | """ 177 | # Check whether the first image is actually a mask. 178 | 179 | if not ((images[0] == 0) | (images[0] == 1)).all(): 180 | raise ValueError('The first image must be a mask with ' 181 | 'values of zero or one') 182 | 183 | if only_display_rays is None: 184 | n_rows = len(cosmic_rays.slices) 185 | else: 186 | n_rows = len(only_display_rays) 187 | 188 | n_columns = len(images) 189 | 190 | width = 12 191 | 192 | # The height below is *CRITICAL*. If the aspect ratio of the figure as 193 | # a whole does not allow for square plots then one ends up with a bunch 194 | # of whitespace. The plots here are square by design. 195 | height = width / n_columns * n_rows 196 | fig, axes = plt.subplots(n_rows, n_columns, sharex=False, sharey='row', 197 | figsize=(width, height)) 198 | 199 | # Generate empty titles if none were provided. 200 | if titles is None: 201 | titles = [''] * n_columns 202 | 203 | display_row = 0 204 | 205 | for row, s in enumerate(cosmic_rays.slices): 206 | if only_display_rays is not None: 207 | if row not in only_display_rays: 208 | # We are not supposed to display this one, so skip it. 209 | continue 210 | 211 | x = _mid(s[1]) 212 | y = _mid(s[0]) 213 | 214 | for column, plot_info in enumerate(zip(images, titles)): 215 | image = plot_info[0] 216 | title = plot_info[1] 217 | is_mask = column == 0 218 | ax = axes[display_row, column] 219 | image_snippet(image, (x, y), width=80, axis=ax, fig=fig, 220 | is_mask=is_mask) 221 | if is_mask: 222 | ax.annotate('Cosmic ray {}'.format(row), (0.1, 0.9), 223 | xycoords='axes fraction', 224 | color='cyan', fontsize=20) 225 | 226 | if display_row == 0: 227 | # Only set the title if it isn't empty. 228 | if title: 229 | ax.set_title(title) 230 | 231 | display_row = display_row + 1 232 | 233 | # This choice results in the images close to each other but with 234 | # a small gap. 235 | plt.subplots_adjust(wspace=0.1, hspace=0.05) 236 | -------------------------------------------------------------------------------- /notebooks/03-05-Calibrate-dark-images.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Calibrate dark images\n", 8 | "\n", 9 | "Dark images, like any other images, need to be calibrated. Depending on the data\n", 10 | "you have and the choices you have made in reducing your data, the steps to\n", 11 | "reducing your images may include:\n", 12 | "\n", 13 | "1. Subtracting overscan (only if you decide to subtract overscan from all\n", 14 | "images).\n", 15 | "2. Trim the image (if it has overscan, whether you are using the overscan or\n", 16 | "not).\n", 17 | "3. Subtract bias (if you need to scale the calibrated dark frames to a different\n", 18 | "exposure time)." 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": null, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "from pathlib import Path\n", 28 | "\n", 29 | "from astropy.nddata import CCDData\n", 30 | "from ccdproc import ImageFileCollection\n", 31 | "import ccdproc as ccdp" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "## Example 1: Overscan subtracted, bias not removed" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "### Take a look at what images you have\n", 46 | "\n", 47 | "First we gather up some information about the raw images and the reduced images\n", 48 | "up to this point. These examples have darks stored in a subdirectory of the\n", 49 | "folder with the rest of the images, so we create an `ImageFileCollection` for\n", 50 | "each." 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "ex1_path_raw = Path('example-cryo-LFC')\n", 60 | "\n", 61 | "ex1_images_raw = ImageFileCollection(ex1_path_raw)\n", 62 | "ex1_darks_raw = ImageFileCollection(ex1_path_raw / 'darks')\n", 63 | "\n", 64 | "ex1_path_reduced = Path('example1-reduced')\n", 65 | "ex1_images_reduced = ImageFileCollection(ex1_path_reduced)" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "#### Raw images, everything except the darks" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "ex1_images_raw.summary['file', 'imagetyp', 'exptime', 'filter']" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": {}, 87 | "source": [ 88 | "#### Raw dark frames" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "ex1_darks_raw.summary['file', 'imagetyp', 'exptime', 'filter']" 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | "### Decide which calibration steps to take\n", 105 | "\n", 106 | "This example is, again, one of the chips of the LFC camera at Palomar. In\n", 107 | "earlier notebooks we have seen that the chip has a [useful overscan region](01.08-Overscan.ipynb#Case-1:-Cryogenically-cooled-Large-Format-Camera-(LFC)-at-Palomar), has little dark current except for some hot pixels, and sensor glow in\n", 108 | "one corner of the chip.\n", 109 | "\n", 110 | "Looking at the list of non-dark images (i.e., the flat and light images) shows\n", 111 | "that for each exposure time in the non-dark images there is a set of dark\n", 112 | "exposures that has a matching, or very close to matching, exposure time.\n", 113 | "\n", 114 | "To be more explicit, there are flats with exposure times of 7.0 sec and 70.011\n", 115 | "sec and darks with exposure time of 7.0 and 70.0 sec. The dark and flat exposure\n", 116 | "times are close enough that there is no need to scale them. The two images of\n", 117 | "an object are each roughly 300 sec, matching the darks with exposure time 300\n", 118 | "sec. The very small difference in exposure time, under 0.1 sec, does not need to\n", 119 | "be compensated for.\n", 120 | "\n", 121 | "Given this, we will:\n", 122 | "\n", 123 | "1. Subtract overscan from each of the darks. The useful overscan region is XXX\n", 124 | "(see LINK).\n", 125 | "2. Trim the overscan out of the dark images.\n", 126 | "\n", 127 | "We will *not* subtract bias from these images because we will *not* need to\n", 128 | "rescale them to a different exposure time." 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "metadata": {}, 134 | "source": [ 135 | "### Calibrate the individual dark frames" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": null, 141 | "metadata": {}, 142 | "outputs": [], 143 | "source": [ 144 | "for ccd, file_name in ex1_darks_raw.ccds(imagetyp='DARK', # Just get the dark frames\n", 145 | " ccd_kwargs={'unit': 'adu'}, # CCDData requires a unit for the image if \n", 146 | " # it is not in the header\n", 147 | " return_fname=True # Provide the file name too.\n", 148 | " ): \n", 149 | " # Subtract the overscan\n", 150 | " ccd = ccdp.subtract_overscan(ccd, overscan=ccd[:, 2055:], median=True)\n", 151 | " \n", 152 | " # Trim the overscan\n", 153 | " ccd = ccdp.trim_image(ccd[:, :2048])\n", 154 | " \n", 155 | " # Save the result\n", 156 | " ccd.write(ex1_path_reduced / file_name)" 157 | ] 158 | }, 159 | { 160 | "cell_type": "markdown", 161 | "metadata": {}, 162 | "source": [ 163 | "#### Reduced images (so far)" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "ex1_images_reduced.refresh()\n", 173 | "ex1_images_reduced.summary['file', 'imagetyp', 'exptime', 'filter', 'combined']" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "metadata": {}, 179 | "source": [ 180 | "## Example 2: Overscan not subtracted, bias is removed" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "metadata": {}, 187 | "outputs": [], 188 | "source": [ 189 | "ex2_path_raw = Path('example-thermo-electric')\n", 190 | "\n", 191 | "ex2_images_raw = ImageFileCollection(ex2_path_raw)\n", 192 | "\n", 193 | "ex2_path_reduced = Path('example2-reduced')\n", 194 | "ex2_images_reduced = ImageFileCollection(ex2_path_reduced)" 195 | ] 196 | }, 197 | { 198 | "cell_type": "markdown", 199 | "metadata": {}, 200 | "source": [ 201 | "We begin by looking at what exposure times we have in this data." 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": null, 207 | "metadata": {}, 208 | "outputs": [], 209 | "source": [ 210 | "ex2_images_raw.summary['file', 'imagetyp', 'exposure'].show_in_notebook()" 211 | ] 212 | }, 213 | { 214 | "cell_type": "markdown", 215 | "metadata": {}, 216 | "source": [ 217 | "### Decide what steps to take next\n", 218 | "\n", 219 | "In this case the only dark frames have exposure time 90 sec. Though that matches\n", 220 | "the exposure time of the science images, the flat field images are much shorter\n", 221 | "exposure time, ranging from 1 sec to 1.21 sec. This type of range of exposure is\n", 222 | "typical when twilight flats are taken. Since these are a much different\n", 223 | "exposure time than the darks, the dark frames will need to be scaled.\n", 224 | "\n", 225 | "Recall that for this camera the overscan is not useful and should be\n", 226 | "trimmed off.\n", 227 | "\n", 228 | "Given this, we will:\n", 229 | "\n", 230 | "1. Trim the overscan from each of the dark frames.\n", 231 | "2. Subtract calibration bias from the dark frames so that we can scale the darks\n", 232 | "to a different exposure time." 233 | ] 234 | }, 235 | { 236 | "cell_type": "markdown", 237 | "metadata": {}, 238 | "source": [ 239 | "### Calibration the individual dark frames" 240 | ] 241 | }, 242 | { 243 | "cell_type": "markdown", 244 | "metadata": {}, 245 | "source": [ 246 | "First, we read the combined bias image created in the previous notebook. Though\n", 247 | "we could do this based on the file name, using a systematic set of header\n", 248 | "keywords to keep track of which images have been combined is less likely to lead\n", 249 | "to errors." 250 | ] 251 | }, 252 | { 253 | "cell_type": "code", 254 | "execution_count": null, 255 | "metadata": {}, 256 | "outputs": [], 257 | "source": [ 258 | "combined_bias = CCDData.read(ex2_images_reduced.files_filtered(imagetyp='bias', \n", 259 | " combined=True, \n", 260 | " include_path=True)[0])" 261 | ] 262 | }, 263 | { 264 | "cell_type": "code", 265 | "execution_count": null, 266 | "metadata": {}, 267 | "outputs": [], 268 | "source": [ 269 | "for ccd, file_name in ex2_images_raw.ccds(imagetyp='DARK', # Just get the bias frames\n", 270 | " return_fname=True # Provide the file name too.\n", 271 | " ):\n", 272 | " \n", 273 | " # Trim the overscan\n", 274 | " ccd = ccdp.trim_image(ccd[:, :4096])\n", 275 | " \n", 276 | " # Subtract bias\n", 277 | " ccd = ccdp.subtract_bias(ccd, combined_bias)\n", 278 | " # Save the result\n", 279 | " ccd.write(ex2_path_reduced / file_name)" 280 | ] 281 | } 282 | ], 283 | "metadata": { 284 | "kernelspec": { 285 | "display_name": "Python 3", 286 | "language": "python", 287 | "name": "python3" 288 | }, 289 | "language_info": { 290 | "codemirror_mode": { 291 | "name": "ipython", 292 | "version": 3 293 | }, 294 | "file_extension": ".py", 295 | "mimetype": "text/x-python", 296 | "name": "python", 297 | "nbconvert_exporter": "python", 298 | "pygments_lexer": "ipython3", 299 | "version": "3.6.8" 300 | } 301 | }, 302 | "nbformat": 4, 303 | "nbformat_minor": 4 304 | } 305 | -------------------------------------------------------------------------------- /notebooks/07-01-Creating-a-sky-flat.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Creating a sky flat\n", 8 | "\n", 9 | "One way of producing flat field images is to use the science images, combined in\n", 10 | "a way that eliminates astronomical sources. This provides an exact match to the\n", 11 | "spectrum of the night sky, since the night sky is the source of light. However,\n", 12 | "the night sky is dark, so the counts in individual images is low. Many images\n", 13 | "must be combined to generate a flat with low noise." 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": {}, 20 | "outputs": [], 21 | "source": [ 22 | "from pathlib import Path\n", 23 | "\n", 24 | "import numpy as np\n", 25 | "\n", 26 | "from astropy.nddata import CCDData\n", 27 | "from astropy import units as u\n", 28 | "from astropy.stats import mad_std\n", 29 | "\n", 30 | "from photutils import detect_threshold, detect_sources, source_properties\n", 31 | "\n", 32 | "import ccdproc as ccdp" 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "## When is it impossible to produce a sky flat?\n", 40 | "\n", 41 | "There are a few circumstances in which producing a sky flat is difficult or\n", 42 | "impossible:\n", 43 | "\n", 44 | "+ The telescope tracks very well so stars and other sources are always in\n", 45 | "roughly the same pixels in all of the images. In this case, there is no way to\n", 46 | "produce a good flat. If several fields of view are observed this should not be\n", 47 | "an issue.\n", 48 | "+ There is an extended source that covers an appreciable fraction of the field\n", 49 | "of view. In this case there is likely to be overlap of the extended object\n", 50 | "between images, so it cannot be removed from the flat.\n", 51 | "+ The sky is really dark. In very dark sites the sky background might be low\n", 52 | "enough that the sky flat is too noisy to be useful." 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "## Producing a sky flat\n", 60 | "\n", 61 | "Producing a sky flat is much like producing any other flat. The images must have\n", 62 | "bias and dark current subtracted (and overscan if it is being used) then\n", 63 | "combined, rescaling each image to take into account different levels of\n", 64 | "background illumination.\n", 65 | "\n", 66 | "It is important to scale the *median* of each image to the same value instead of\n", 67 | "scaling the *mean* because the presence of bright sources will affect the mean\n", 68 | "much more than the median.\n", 69 | "\n", 70 | "One down side of producing sky flats is the need to process the science images\n", 71 | "twice. The first time all of the usual calibration steps except flat fielding\n", 72 | "are done, then the flats are produced, then each science image is flat\n", 73 | "corrected." 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": {}, 79 | "source": [ 80 | "### Partially calibrate science images\n", 81 | "\n", 82 | "The partially reduced images are saved in a different folder than the completely\n", 83 | "reduced science images that were processed earlier.\n", 84 | "\n", 85 | "The images for this example were taken the same night as the other images in\n", 86 | "\"Example 2\" in earlier notebooks.\n", 87 | "\n", 88 | "First, we set up some of the locations we will need." 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "ex2_calibrated = Path('example2-reduced')\n", 98 | "\n", 99 | "sky_flat_bad_raw = Path('sky_flat_good_raw')\n", 100 | "\n", 101 | "sky_flat_bad_working = Path('sky_flat_good_working')\n", 102 | "sky_flat_bad_working.mkdir(exist_ok=True)" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "Next, load the combined bias and combined dark for this night. Recall that the\n", 110 | "combined dark for this night was bias-subtracted because it needed to be scaled\n", 111 | "for the flat images (see [this notebook](03.05-Calibrate-dark-images.ipynb#Example-2:-Overscan-not-subtracted,-bias-is-removed) for more detail).\n", 112 | "\n", 113 | "All of the science exposures this night had the same exposure time, 90 sec." 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": null, 119 | "metadata": {}, 120 | "outputs": [], 121 | "source": [ 122 | "combined_bias = CCDData.read(ex2_calibrated / 'combined_bias.fit')\n", 123 | "combined_dark = CCDData.read(ex2_calibrated / 'combined_dark_90.000.fit')" 124 | ] 125 | }, 126 | { 127 | "cell_type": "markdown", 128 | "metadata": {}, 129 | "source": [ 130 | "The telescope tracking changed during this night. Tracking was excellent for\n", 131 | "observations of Kelt 16b, making the images terrible for sky flats, but\n", 132 | "excellent for illustrating the failure of sky flats under some circumstances." 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": null, 138 | "metadata": { 139 | "scrolled": false 140 | }, 141 | "outputs": [], 142 | "source": [ 143 | "ifc_raw = ccdp.ImageFileCollection(sky_flat_bad_raw)\n", 144 | "\n", 145 | "for ccd, name in ifc_raw.ccds(imagetyp='light', object='wasp 10 b', filter=\"r\", return_fname=True):\n", 146 | " reduced = ccdp.trim_image(ccd[:, :4096])\n", 147 | " reduced = ccdp.subtract_bias(reduced, combined_bias)\n", 148 | " reduced = ccdp.subtract_dark(reduced, combined_dark, exposure_time='exposure', exposure_unit=u.second)\n", 149 | " thresh = detect_threshold(an_im, 2)\n", 150 | " segm = detect_sources(an_im, thresh, 30)\n", 151 | " reduced.data[segm.data > 0] = np.nan\n", 152 | " reduced.data = reduced.data.astype('float32')\n", 153 | " reduced.write(sky_flat_bad_working / name)" 154 | ] 155 | }, 156 | { 157 | "cell_type": "markdown", 158 | "metadata": {}, 159 | "source": [ 160 | "### Combine the partially calibrated images\n", 161 | "\n", 162 | "The combination settings here are important. Either combine by averaging and\n", 163 | "sigma clip or combine by median. Either should ensure that stars do not show up\n", 164 | "in your final flat as long as there is enough offset between the images. Images\n", 165 | "need to be scaled so that the median is the same for each image. Typically, a\n", 166 | "value of one is chosen as the common value." 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "ifc_working = ccdp.ImageFileCollection(sky_flat_bad_working)" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": null, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "to_combine = [ccd for ccd in ifc_working.ccds()]\n", 185 | "\n", 186 | "def inv_median(array):\n", 187 | " return 1 / np.nanmedian(array)\n", 188 | "\n", 189 | "sky_flat = ccdp.combine(to_combine, scale=inv_median, \n", 190 | " sigma_clip=True, sigma_clip_low_thresh=3, sigma_clip_high_thresh=3,\n", 191 | " sigma_clip_func=np.nanmedian, sigma_clip_dev_func=mad_std, \n", 192 | " mem_limit=2e9\n", 193 | " )\n", 194 | "\n" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": {}, 201 | "outputs": [], 202 | "source": [ 203 | "from convenience_functions import show_image" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": null, 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "an_im = CCDData.read(sky_flat_bad_working / 'wasp-10-b-S001-R001-C050-r.fit')" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "metadata": {}, 219 | "outputs": [], 220 | "source": [ 221 | "show_image(an_im, cmap='gray')" 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": null, 227 | "metadata": {}, 228 | "outputs": [], 229 | "source": [ 230 | "from photutils import detect_threshold, detect_sources, source_properties" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": null, 236 | "metadata": {}, 237 | "outputs": [], 238 | "source": [ 239 | "foo = detect_threshold(an_im, 2)" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": null, 245 | "metadata": {}, 246 | "outputs": [], 247 | "source": [ 248 | "arf = detect_sources(an_im, foo, 30)" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "metadata": {}, 255 | "outputs": [], 256 | "source": [ 257 | "show_image(arf.data > 0, cmap='gray', is_mask=True)" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "metadata": {}, 264 | "outputs": [], 265 | "source": [ 266 | "(arf.data > 0).sum()" 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": null, 272 | "metadata": {}, 273 | "outputs": [], 274 | "source": [ 275 | "moo = source_properties(an_im.data, arf)" 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": null, 281 | "metadata": {}, 282 | "outputs": [], 283 | "source": [ 284 | "moo.to_table()" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": null, 290 | "metadata": {}, 291 | "outputs": [], 292 | "source": [ 293 | "show_image(sky_flat, cmap='gray')" 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": null, 299 | "metadata": {}, 300 | "outputs": [], 301 | "source": [ 302 | "sky_flat.write('supposed_to_be_good_but_has_streaks.fits')" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": null, 308 | "metadata": {}, 309 | "outputs": [], 310 | "source": [] 311 | } 312 | ], 313 | "metadata": { 314 | "kernelspec": { 315 | "display_name": "Python 3", 316 | "language": "python", 317 | "name": "python3" 318 | }, 319 | "language_info": { 320 | "codemirror_mode": { 321 | "name": "ipython", 322 | "version": 3 323 | }, 324 | "file_extension": ".py", 325 | "mimetype": "text/x-python", 326 | "name": "python", 327 | "nbconvert_exporter": "python", 328 | "pygments_lexer": "ipython3", 329 | "version": "3.6.8" 330 | } 331 | }, 332 | "nbformat": 4, 333 | "nbformat_minor": 2 334 | } 335 | -------------------------------------------------------------------------------- /notebooks/03-06-Combine-darks-for-use-in-later-calibration-steps.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Combine calibrated dark images for use in later reduction steps\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "The final step is to combine the individual calibrated dark images into a single\n", 15 | "combined image. That combined image will have less noise than the individual\n", 16 | "images, minimizing the noise added to the remaining images when the dark is\n", 17 | "subtracted.\n", 18 | "\n", 19 | "Regardless of which path you took through the calibration of the biases (with\n", 20 | "overscan or without) there should be a folder named either `example1-reduced` or\n", 21 | "`example2-reduced` that contains the calibrated bias and dark images. If there\n", 22 | "is not, please run the previous notebook before continuing with this one." 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "from pathlib import Path\n", 32 | "import os\n", 33 | "\n", 34 | "from astropy.nddata import CCDData\n", 35 | "from astropy.stats import mad_std\n", 36 | "\n", 37 | "import ccdproc as ccdp\n", 38 | "import matplotlib.pyplot as plt\n", 39 | "import numpy as np\n", 40 | "\n", 41 | "from convenience_functions import show_image" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "# Use custom style for larger fonts and figures\n", 51 | "plt.style.use('guide.mplstyle')" 52 | ] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "metadata": {}, 57 | "source": [ 58 | "## Recommended settings for image combination\n", 59 | "\n", 60 | "As discussed in the [notebook about combining images](01-06-Image-combination.ipynb), the recommendation is\n", 61 | "that you combine by averaging the individual images but sigma clip to remove\n", 62 | "extreme values.\n", 63 | "\n", 64 | "[ccdproc](https://ccdproc.readthedocs.org) provides two ways to combine:\n", 65 | "\n", 66 | "+ An object-oriented interface built around the `Combiner` object, described in\n", 67 | "the [ccdproc documentation on image combination](https://ccdproc.readthedocs.io/en/latest/image_combination.html).\n", 68 | "+ A function called [`combine`](https://ccdproc.readthedocs.io/en/latest/api/ccdproc.combine.html#ccdproc.combine), which we will use here because the function\n", 69 | "allows you to specify the maximum amount of memory that should be used during\n", 70 | "combination. That feature can be essential depending on how many images you need\n", 71 | "to combine, how big they are, and how much memory your computer has.\n", 72 | "\n", 73 | "*NOTE: If using a version of ccdproc lower than 2.0, set the memory limit a\n", 74 | "factor of 2-3 lower than you want the maximum memory consumption to be.*" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": {}, 80 | "source": [ 81 | "## Example 1: Cryogenically-cooled camera" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": {}, 87 | "source": [ 88 | "The remainder of this section assumes that the calibrated bias images are in the\n", 89 | "folder `example1-reduced` which was created in the previous notebook." 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "calibrated_path = Path('example1-reduced')\n", 99 | "reduced_images = ccdp.ImageFileCollection(calibrated_path)" 100 | ] 101 | }, 102 | { 103 | "cell_type": "markdown", 104 | "metadata": {}, 105 | "source": [ 106 | "### Make a combined image for each exposure time in Example 1" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": {}, 112 | "source": [ 113 | "There are several dark exposure times in this data set. By converting the times\n", 114 | "in the summary table to a set it returns only the unique values." 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "darks = reduced_images.summary['imagetyp'] == 'DARK'\n", 124 | "dark_times = set(reduced_images.summary['exptime'][darks])\n", 125 | "print(dark_times)" 126 | ] 127 | }, 128 | { 129 | "cell_type": "markdown", 130 | "metadata": {}, 131 | "source": [ 132 | "The code below loops over the dark exposure times and, for each exposure time:\n", 133 | "\n", 134 | "+ selects the relevant calibrated dark images,\n", 135 | "+ combines them using the `combine` function,\n", 136 | "+ adds the keyword `COMBINED` to the header so that later calibration steps can\n", 137 | "easily identify which bias to use, and\n", 138 | "+ writes the file whose name includes the exposure time." 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": null, 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "for exp_time in sorted(dark_times):\n", 148 | " calibrated_darks = reduced_images.files_filtered(imagetyp='dark', exptime=exp_time,\n", 149 | " include_path=True)\n", 150 | "\n", 151 | " combined_dark = ccdp.combine(calibrated_darks,\n", 152 | " method='average',\n", 153 | " sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,\n", 154 | " sigma_clip_func=np.ma.median, sigma_clip_dev_func=mad_std,\n", 155 | " mem_limit=350e6\n", 156 | " )\n", 157 | "\n", 158 | " combined_dark.meta['combined'] = True\n", 159 | "\n", 160 | " dark_file_name = 'combined_dark_{:6.3f}.fit'.format(exp_time)\n", 161 | " combined_dark.write(calibrated_path / dark_file_name)" 162 | ] 163 | }, 164 | { 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "### Result for Example 1" 169 | ] 170 | }, 171 | { 172 | "cell_type": "markdown", 173 | "metadata": {}, 174 | "source": [ 175 | "A single calibrated 300 second dark image and the combined 300 second image are\n", 176 | "shown below." 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))\n", 186 | "\n", 187 | "show_image(CCDData.read(calibrated_darks[0]).data, cmap='gray', ax=ax1, fig=fig)\n", 188 | "ax1.set_title('Single calibrated dark')\n", 189 | "show_image(combined_dark.data, cmap='gray', ax=ax2, fig=fig)\n", 190 | "ax2.set_title('{} dark images combined'.format(len(calibrated_darks)))" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "## Example 2: Thermoelectrically-cooled camera\n", 198 | "\n", 199 | "The process for combining the images is exactly the same as in example 1. The\n", 200 | "only difference is the directory that contains the calibrated bias frames." 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": null, 206 | "metadata": {}, 207 | "outputs": [], 208 | "source": [ 209 | "calibrated_path = Path('example2-reduced')\n", 210 | "reduced_images = ccdp.ImageFileCollection(calibrated_path)" 211 | ] 212 | }, 213 | { 214 | "cell_type": "markdown", 215 | "metadata": {}, 216 | "source": [ 217 | "### Make a combined image for each exposure time in Example 2" 218 | ] 219 | }, 220 | { 221 | "cell_type": "markdown", 222 | "metadata": {}, 223 | "source": [ 224 | "In this example there are only darks of a single exposure time." 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": null, 230 | "metadata": {}, 231 | "outputs": [], 232 | "source": [ 233 | "darks = reduced_images.summary['imagetyp'] == 'DARK'\n", 234 | "dark_times = set(reduced_images.summary['exptime'][darks])\n", 235 | "print(dark_times)" 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "metadata": {}, 241 | "source": [ 242 | "Despite the fact that there is only one exposure time, we might as well reuse\n", 243 | "the code from above." 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": null, 249 | "metadata": {}, 250 | "outputs": [], 251 | "source": [ 252 | "for exp_time in sorted(dark_times):\n", 253 | " calibrated_darks = reduced_images.files_filtered(imagetyp='dark', exptime=exp_time,\n", 254 | " include_path=True)\n", 255 | "\n", 256 | " combined_dark = ccdp.combine(calibrated_darks,\n", 257 | " method='average',\n", 258 | " sigma_clip=True, sigma_clip_low_thresh=5, sigma_clip_high_thresh=5,\n", 259 | " sigma_clip_func=np.ma.median, signma_clip_dev_func=mad_std,\n", 260 | " mem_limit=350e6\n", 261 | " )\n", 262 | "\n", 263 | " combined_dark.meta['combined'] = True\n", 264 | "\n", 265 | " dark_file_name = 'combined_dark_{:6.3f}.fit'.format(exp_time)\n", 266 | " combined_dark.write(calibrated_path / dark_file_name)" 267 | ] 268 | }, 269 | { 270 | "cell_type": "markdown", 271 | "metadata": {}, 272 | "source": [ 273 | "### Result for Example 2" 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "metadata": {}, 279 | "source": [ 280 | "The difference between a single calibrated bias image and the combined bias\n", 281 | "image is much clearer in this case." 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": null, 287 | "metadata": {}, 288 | "outputs": [], 289 | "source": [ 290 | "fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))\n", 291 | "\n", 292 | "show_image(CCDData.read(calibrated_darks[0]).data, cmap='gray', ax=ax1, fig=fig)\n", 293 | "ax1.set_title('Single calibrated dark')\n", 294 | "show_image(combined_dark.data, cmap='gray', ax=ax2, fig=fig)\n", 295 | "ax2.set_title('{} dark images combined'.format(len(calibrated_darks)))" 296 | ] 297 | } 298 | ], 299 | "metadata": { 300 | "kernelspec": { 301 | "display_name": "Python 3", 302 | "language": "python", 303 | "name": "python3" 304 | }, 305 | "language_info": { 306 | "codemirror_mode": { 307 | "name": "ipython", 308 | "version": 3 309 | }, 310 | "file_extension": ".py", 311 | "mimetype": "text/x-python", 312 | "name": "python", 313 | "nbconvert_exporter": "python", 314 | "pygments_lexer": "ipython3", 315 | "version": "3.6.8" 316 | } 317 | }, 318 | "nbformat": 4, 319 | "nbformat_minor": 4 320 | } 321 | -------------------------------------------------------------------------------- /notebooks/test-toc.rst: -------------------------------------------------------------------------------- 1 | `Preface <00.00-Preface.ipynb>`__ 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | `1. Understanding an astronomical CCD image <01.00-Understanding-an-astronomical-CCD-image.ipynb>`__ 5 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6 | 7 | - `Construction of an artificial (but realistic) 8 | image <01.04-Construction-of-an-artificial-(but-realistic)-image.ipynb>`__ 9 | - `Calibration overview <01.05-Calibration-overview.ipynb>`__ 10 | - `Image combination <01.06-Image-combination.ipynb>`__ 11 | - `Calibration choices you need to 12 | make <01.07-Calibration-choices-you-need-to-make.ipynb>`__ 13 | - `Reading images <01.08-reading-images.ipynb>`__ 14 | 15 | `2. Handling overscan, trimming, and bias subtraction <02.00-Handling-overscan,-trimming,-and-bias-subtraction.ipynb>`__ 16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 17 | 18 | - `Inspect your images and make a choice about next 19 | steps <02.01-Inspect-your-images-and-make-a-choice-about-next-steps.ipynb>`__ 20 | - `Subtract overscan, if 21 | desired <02.02-Subtract-overscan,-if-desired.ipynb>`__ 22 | - `Trim, if needed <02.03-Trim,-if-needed.ipynb>`__ 23 | - `Combine bias images to make 24 | master <02.04-Combine-bias-images-to-make-master.ipynb>`__ 25 | 26 | `3. Dark current and hot pixels <03.00-Dark-current-and-hot-pixels.ipynb>`__ 27 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 28 | 29 | - `The ideal case: your dark frames measure dark current, which scales 30 | linearly with 31 | time <03.01-The-ideal-case:-your-dark-frames-measure-dark-current,-which-scales-linearly-with-time.ipynb>`__ 32 | - `Reality: most of your dark frame is noise and not all of the time 33 | dependent artifacts are dark 34 | current <03.02-Reality:-most-of-your-dark-frame-is-noise-and-not-all-of-the-time-dependent-artifacts-are-dark-current.ipynb>`__ 35 | - `Identifying hot pixels <03.03-Identifying-hot-pixels.ipynb>`__ 36 | - `Make a choice about next steps for 37 | darks <03.04-Make-a-choice-about-next-steps-for-darks.ipynb>`__ 38 | - `Subtract bias, if 39 | necessary <03.05-Subtract-bias,-if-necessary.ipynb>`__ 40 | 41 | `4. Interlude: Image masking <04.00-Interlude:-Image-masking.ipynb>`__ 42 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 43 | 44 | - `Identifying bad pixels <04.01-Identifying-bad-pixels.ipynb>`__ 45 | - `Creating a mask <04.02-Creating-a-mask.ipynb>`__ 46 | - `incorporating the mask in 47 | reduction <04.03-incorporating-the-mask-in-reduction.ipynb>`__ 48 | 49 | `5. Flat corrections <05.00-Flat-corrections.ipynb>`__ 50 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 51 | 52 | - `There are no perfect 53 | flats <05.01-There-are-no-perfect-flats.ipynb>`__ 54 | - `Make a choice about next steps for 55 | flats <05.02-Make-a-choice-about-next-steps-for-flats.ipynb>`__ 56 | - `Calibrating the flats <05.03-Calibrating-the-flats.ipynb>`__ 57 | - `Combining flats <05.04-Combining-flats.ipynb>`__ 58 | 59 | `Reducing science images <06.00-Reducing-science-images.ipynb>`__ 60 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | 62 | - `Initial reduction <06.01-Initial-reduction.ipynb>`__ 63 | - `Cosmic ray removal <06.02-Cosmic-ray-removal.ipynb>`__ 64 | 65 | `7. Combining images <07.00-Combining-images.ipynb>`__ 66 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 67 | 68 | - `Combine without aligning to create a sky 69 | flat <07.01-Combine-without-aligning-to-create-a-sky-flat.ipynb>`__ 70 | - `Combination with alignment via 71 | WCS <07.02-Combination-with-alignment-via-WCS.ipynb>`__ 72 | - `Combination with alignment based on star positions in the 73 | image <07.03-Combination-with-alignment-based-on-star-positions-in-the-image.ipynb>`__ 74 | 75 | ###################################################################### 76 | 77 | `Preface `__ 78 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 79 | 80 | `1. Understanding an astronomical CCD image `__ 81 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 82 | 83 | - `Construction of an artificial (but realistic) 84 | image `__ 85 | - `Calibration 86 | overview `__ 87 | - `Image 88 | combination `__ 89 | - `Calibration choices you need to 90 | make `__ 91 | - `Reading 92 | images `__ 93 | 94 | `2. Handling overscan, trimming, and bias subtraction `__ 95 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 96 | 97 | - `Inspect your images and make a choice about next 98 | steps `__ 99 | - `Subtract overscan, if 100 | desired `__ 101 | - `Trim, if 102 | needed `__ 103 | - `Combine bias images to make 104 | master `__ 105 | 106 | `3. Dark current and hot pixels `__ 107 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 108 | 109 | - `The ideal case: your dark frames measure dark current, which scales 110 | linearly with 111 | time `__ 112 | - `Reality: most of your dark frame is noise and not all of the time 113 | dependent artifacts are dark 114 | current `__ 115 | - `Identifying hot 116 | pixels `__ 117 | - `Make a choice about next steps for 118 | darks `__ 119 | - `Subtract bias, if 120 | necessary `__ 121 | 122 | `4. Interlude: Image masking `__ 123 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 124 | 125 | - `Identifying bad 126 | pixels `__ 127 | - `Creating a 128 | mask `__ 129 | - `incorporating the mask in 130 | reduction `__ 131 | 132 | `5. Flat corrections `__ 133 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 134 | 135 | - `There are no perfect 136 | flats `__ 137 | - `Make a choice about next steps for 138 | flats `__ 139 | - `Calibrating the 140 | flats `__ 141 | - `Combining 142 | flats `__ 143 | 144 | `Reducing science images `__ 145 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 146 | 147 | - `Initial 148 | reduction `__ 149 | - `Cosmic ray 150 | removal `__ 151 | 152 | `7. Combining images `__ 153 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 154 | 155 | - `Combine without aligning to create a sky 156 | flat `__ 157 | - `Combination with alignment via 158 | WCS `__ 159 | - `Combination with alignment based on star positions in the 160 | image `__ 161 | -------------------------------------------------------------------------------- /notebooks/01-05-Calibration-overview.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Calibration overview\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "An image of the sky contains counts from several sources. The task of data\n", 15 | "reduction (another name for image calibration) is to remove all non-celestial\n", 16 | "counts from the image and to correct for non-uniform sensitivity.\n", 17 | "\n", 18 | "At the end of the previous notebook we arrived at an expression for the counts\n", 19 | "in a science image in terms of the sources of counts:\n", 20 | "\n", 21 | "$$\n", 22 | "\\text{raw image} = \\text{bias} + \\text{noise} + \\text{dark current} + \\text{flat} \\times (\\text{sky} + \\text{stars}).\n", 23 | "$$\n", 24 | "\n", 25 | "Solving for the counts just from the stars is as follows:\n", 26 | "\n", 27 | "$$\n", 28 | "\\text{stars} + \\text{noise} = \\frac{\\text{raw image} - \\text{bias} - \\text{dark current}}{\\text{flat}} - \\text{sky}\n", 29 | "$$\n", 30 | "\n", 31 | "**It is *impossible* to remove the noise from the raw image because the noise is\n", 32 | "random.**\n", 33 | "\n", 34 | "The dark current is typically calculated from a *dark frame* (aka dark image).\n", 35 | "Such an image has bias and read noise in it as well, so:\n", 36 | "\n", 37 | "$$\n", 38 | "\\text{dark current} + \\text{noise} = (\\text{dark frame} - \\text{bias})/(\\text{dark exposure time})\n", 39 | "$$\n", 40 | "\n", 41 | "Once again, note that the noise cannot be removed." 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "## This noise cannot be removed from CCD images\n", 49 | "\n", 50 | "To demonstrate that you cannot remove the noise from an image, let's construct\n", 51 | "an image with just stars and noise and try to subtract a noise image created\n", 52 | "with the same parameters. The amount of noise here is exaggerated to make it\n", 53 | "clear in the images." 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "import numpy as np\n", 63 | "%matplotlib inline\n", 64 | "from matplotlib import pyplot as plt\n", 65 | "\n", 66 | "from astropy.visualization import hist\n", 67 | "from astropy.stats import histogram\n", 68 | "\n", 69 | "import image_sim as imsim\n", 70 | "from convenience_functions import show_image" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "# Use custom style for larger fonts and figures\n", 80 | "plt.style.use('guide.mplstyle')" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": {}, 86 | "source": [ 87 | "### First, some stars with noise\n", 88 | "\n", 89 | "The image below shows stars (the larger \"blobs\" in the image) but shows quite a\n", 90 | "bit of noise as well (the much smaller \"dots\")." 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "metadata": {}, 97 | "outputs": [], 98 | "source": [ 99 | "image = np.zeros([2000, 2000])\n", 100 | "gain = 1.0\n", 101 | "noise_amount = 1500 \n", 102 | "\n", 103 | "stars_with_noise = imsim.stars(image, 50, max_counts=2000, fwhm=10) + imsim.read_noise(image, noise_amount, gain=gain)\n", 104 | "\n", 105 | "show_image(stars_with_noise, cmap='gray', percu=99.9)\n", 106 | "plt.title('Stars with noise')" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": {}, 112 | "source": [ 113 | "### Now an *incorrect* attempt at reducing noise\n", 114 | "\n", 115 | "Notice that the call to the noise function has exactly the same arguments as\n", 116 | "above, in much the same way your camera's electronics will have the same noise\n", 117 | "properties every time you read out an image.\n", 118 | "\n", 119 | "However, the amount of noise has **increased**, not decreased. It's much harder\n", 120 | "to pick out the stars in this image." 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": null, 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "incorrect_attempt_to_remove_noise = stars_with_noise - imsim.read_noise(image, noise_amount, gain=gain)\n", 130 | "\n", 131 | "show_image(incorrect_attempt_to_remove_noise, cmap='gray', percu=99.9)" 132 | ] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "metadata": {}, 137 | "source": [ 138 | "## Every image has noise\n", 139 | "\n", 140 | "Every image, including calibration images like bias and dark frames, has noise.\n", 141 | "If we tried to calibrate images by taking a single bias image and a single dark\n", 142 | "image, the final result might well look worse than before the image is reduced.\n", 143 | "\n", 144 | "For demonstration, we'll see what happens below.\n", 145 | "\n", 146 | "Note that here we construct *realistic* bias and dark, but leave read noise out\n", 147 | "of the flat; we'll return to that point later." 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "### First, set parameters for the CCD\n", 155 | "\n", 156 | "These are the same as in the previous notebook, except for the read noise, which\n", 157 | "is 700$e-$, 100 times larger than in the previous notebook." 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": null, 163 | "metadata": {}, 164 | "outputs": [], 165 | "source": [ 166 | "gain = 1.0\n", 167 | "star_exposure = 30.0\n", 168 | "dark_exposure = 60.0\n", 169 | "dark = 0.1\n", 170 | "sky_counts = 20\n", 171 | "bias_level = 1100\n", 172 | "read_noise_electrons = 700\n", 173 | "max_star_counts = 2000" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "metadata": {}, 179 | "source": [ 180 | "### Generate the images, with noise" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "metadata": {}, 187 | "outputs": [], 188 | "source": [ 189 | "bias_with_noise = (imsim.bias(image, bias_level, realistic=True) + \n", 190 | " imsim.read_noise(image, read_noise_electrons, gain=gain))\n", 191 | "\n", 192 | "dark_frame_with_noise = (imsim.bias(image, bias_level, realistic=True) + \n", 193 | " imsim.dark_current(image, dark, dark_exposure, gain=gain, hot_pixels=True) +\n", 194 | " imsim.read_noise(image, read_noise_electrons, gain=gain))\n", 195 | "\n", 196 | "flat = imsim.sensitivity_variations(image)" 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "execution_count": null, 202 | "metadata": {}, 203 | "outputs": [], 204 | "source": [ 205 | "realistic_stars = (imsim.stars(image, 50, max_counts=max_star_counts) +\n", 206 | " imsim.dark_current(image, dark, star_exposure, gain=gain, hot_pixels=True) +\n", 207 | " imsim.bias(image, bias_level, realistic=True) +\n", 208 | " imsim.read_noise(image, read_noise_electrons, gain=gain)\n", 209 | " )" 210 | ] 211 | }, 212 | { 213 | "cell_type": "markdown", 214 | "metadata": {}, 215 | "source": [ 216 | "### Uncalibrated image\n", 217 | "\n", 218 | "Below we display the uncalibrated image; in a moment we'll compare it to the\n", 219 | "calibrated version. Even though they don't stand out there really are stars in\n", 220 | "it." 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": null, 226 | "metadata": {}, 227 | "outputs": [], 228 | "source": [ 229 | "plt.figure(figsize=(12, 12))\n", 230 | "show_image(realistic_stars, cmap='gray', percu=99.9, figsize=(9, 9))" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "metadata": {}, 236 | "source": [ 237 | "### Reduce (calibrate) the star image\n", 238 | "\n", 239 | "First we calculate the dark current, scaled to the exposure time of our light\n", 240 | "image." 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": null, 246 | "metadata": {}, 247 | "outputs": [], 248 | "source": [ 249 | "scaled_dark_current = star_exposure * (dark_frame_with_noise - bias_with_noise) / dark_exposure" 250 | ] 251 | }, 252 | { 253 | "cell_type": "markdown", 254 | "metadata": {}, 255 | "source": [ 256 | "Next, we subtract the bias and dark current from the star image and then apply\n", 257 | "the flat correction." 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "metadata": {}, 264 | "outputs": [], 265 | "source": [ 266 | "calibrated_stars = (realistic_stars - bias_with_noise - scaled_dark_current) / flat" 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": null, 272 | "metadata": {}, 273 | "outputs": [], 274 | "source": [ 275 | "show_image(calibrated_stars, cmap='gray', percu=99.9)" 276 | ] 277 | }, 278 | { 279 | "cell_type": "markdown", 280 | "metadata": {}, 281 | "source": [ 282 | "### Reducing the image cleans up the image a bit\n", 283 | "\n", 284 | "The stars stand more clearly than in the unreduced image.\n", 285 | "\n", 286 | "This image does not look *much* better than the uncalibrated image, but remember\n", 287 | "that the read noise used in this simulated image, 700 $e^-$ per pixel, is\n", 288 | "unrealistically high." 289 | ] 290 | }, 291 | { 292 | "cell_type": "markdown", 293 | "metadata": {}, 294 | "source": [ 295 | "### Reducing the image increases the noise in the image\n", 296 | "\n", 297 | "The histogram below shows pixel values before and after calibration. The width\n", 298 | "of the distribution is a measure of the read noise. As expected, reducing the\n", 299 | "image increases the read noise. One reason one takes several calibration images\n", 300 | "of each type is to reduce the amount of noise in the calibration image. That\n", 301 | "will, in turn, keep the noise in the final image as small as possible." 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": null, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "plt.figure(figsize=(9, 9))\n", 311 | "hist(calibrated_stars.flatten(), bins='freedman', label='calibrated star image', alpha=0.5)\n", 312 | "hist(stars_with_noise.flatten(), bins='freedman', label='raw star image', alpha=0.5)\n", 313 | "plt.legend()\n", 314 | "plt.grid()\n", 315 | "plt.xlabel('Count level in image')\n", 316 | "plt.ylabel('Number of pixels with that count');" 317 | ] 318 | } 319 | ], 320 | "metadata": { 321 | "kernelspec": { 322 | "display_name": "Python 3", 323 | "language": "python", 324 | "name": "python3" 325 | }, 326 | "language_info": { 327 | "codemirror_mode": { 328 | "name": "ipython", 329 | "version": 3 330 | }, 331 | "file_extension": ".py", 332 | "mimetype": "text/x-python", 333 | "name": "python", 334 | "nbconvert_exporter": "python", 335 | "pygments_lexer": "ipython3", 336 | "version": "3.6.8" 337 | } 338 | }, 339 | "nbformat": 4, 340 | "nbformat_minor": 2 341 | } 342 | -------------------------------------------------------------------------------- /notebooks/add_github_links.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from hashlib import md5 3 | import random 4 | import time 5 | import os 6 | import re 7 | 8 | from github3 import login 9 | import nbformat as nbf 10 | 11 | from link_fix import markdown_cells 12 | 13 | DEFAULT_COMMENT_GROUP = 'default-comment-group' 14 | 15 | 16 | def github_magic(nb_file_for_book, original_notebook, 17 | comment_group=DEFAULT_COMMENT_GROUP): 18 | """ 19 | Add links in nb_file to lines on PR opened just for commenting 20 | on this specific file. 21 | """ 22 | # 5. Scan the notebook for sections headers (level 2 or 3). <---- BOTH 23 | # 6. Get line numbers IN The ORIGINAL notebook of these headers. <--- ORIG 24 | # 7. Add a link after the header with text something like that below. <--- BOTH 25 | # Link is to the magical github place for making comments. 26 | # Done! 27 | repo = get_github_repo('mwcraig', 'ccd-reduction-and-photometry-guide') 28 | 29 | base_url = \ 30 | create_pr_for_commenting(original_notebook, repo, 31 | comment_group=comment_group) 32 | 33 | heading_in_original = find_headers(original_notebook, 34 | highest_level=2, 35 | lowest_level=3) 36 | 37 | comment_link_text = ('*Click here to comment on this section on ' 38 | 'GitHub (opens in new tab).*') 39 | 40 | cell_content_to_insert = \ 41 | {k: f'\n[{comment_link_text}]({base_url + str(v)})' + 42 | '{:target="_blank"}\n' 43 | for k, v in heading_in_original.items()} 44 | 45 | book_nb = nbf.read(nb_file_for_book, as_version=4) 46 | 47 | for cell in markdown_cells(book_nb): 48 | for k, v in cell_content_to_insert.items(): 49 | if k in cell['source']: 50 | pre, post = cell['source'].split(k) 51 | new_source = pre + k + v + post 52 | cell['source'] = new_source 53 | with open(nb_file_for_book, 'w') as fp: 54 | nbf.write(book_nb, fp) 55 | 56 | 57 | def get_github_repo(owner, repo): 58 | """ 59 | Log in to github and retrieve a reference to the requested 60 | repository. 61 | """ 62 | token = os.getenv('GITHUB_TOKEN') 63 | 64 | if not token: 65 | raise RuntimeError('Set GITHUB_TOKEN to a ' 66 | 'github token before running.') 67 | gh = login(token=token) 68 | 69 | return gh.repository(owner, repo) 70 | 71 | 72 | def create_pr_for_commenting(original_nb, repo, 73 | comment_group=DEFAULT_COMMENT_GROUP): 74 | """ 75 | Create a new PR for the notebook original_nb against a (nearly) empty 76 | branch to make a clean difference for commenting. 77 | 78 | PRs/branches (which are necessary for opening the PRs) are labeled 79 | with ``comment_group`` to make it easier to process them in bulk later. 80 | 81 | 82 | Parameters 83 | ---------- 84 | 85 | original_nb : str 86 | Name of the notebook for which the branch/PR is to be created. 87 | 88 | repo : github3.py Repo object 89 | Repository in which the PR/branch is to be created. 90 | 91 | comment_group : str, optional 92 | Name of the label applied to the PR opened with this function. 93 | 94 | Returns 95 | ------- 96 | 97 | str 98 | URL to which the line number needs to be appended for a link directly 99 | to this file/line in the github PR. 100 | """ 101 | 102 | # We are going to bling the h*ck out of this. Adding labels! 103 | # With colors set by the hash of the comment group name! 104 | label_name = comment_group 105 | label_color = md5(label_name.encode()).hexdigest()[:6] 106 | label_description = f'For commenting as part of {comment_group}' 107 | 108 | # We'll make new branches off the initial commit to get a nice, 109 | # clean diff. 110 | first_commit = '6e20c1c2f5ef09206f02a5f5f67fcd818859a8c9' 111 | 112 | # Pull requests are made against this branch, which was also from the 113 | # first commit. Again, gives nice diffs. 114 | pr_base = 'for-making-comments' 115 | branch_name = f'{comment_group}/{original_nb}' 116 | # 1. Add a branch for this file. Name is tag-file_name. <--- ORIG 117 | _ = repo.create_branch_ref(branch_name, sha=first_commit) 118 | with open(original_nb, 'rb') as f: 119 | nb_content = f.read() 120 | file_name = f'notebooks/{original_nb}' 121 | commit_msg = f'Only for commenting, part of {comment_group}' 122 | repo.create_file(file_name, commit_msg, nb_content, branch=branch_name) 123 | pr_title = f'For commenting on {original_nb} (part of {comment_group})' 124 | new_pr = repo.create_pull(pr_title, pr_base, branch_name) 125 | 126 | # So labels get added to issues, not PRs... 127 | pr_issue = new_pr.issue() 128 | 129 | # This should be the only label, grab it.. 130 | label = pr_issue.add_labels(label_name)[0] 131 | # ...and add some bling! 132 | label.update(label_name, label_color, label_description) 133 | 134 | # Only one file, so our notebook will be it. 135 | this_notebook = [f for f in new_pr.files()][0] 136 | 137 | # The md5 has of the filename is part of the link for commenting. 138 | m = md5(this_notebook.filename.encode()) 139 | 140 | # The only thing this URL needs added to it to go to a specific line 141 | # in the file is the line number. 142 | # About that "R" on the end: It indicates the line number is for the 143 | # "right" side of the difference, which turns out to be where our 144 | # file ends up. 145 | base_url_for_comment = (new_pr.html_url + 146 | f'/files#diff-{m.hexdigest()}' + 147 | 'R') 148 | 149 | return base_url_for_comment 150 | 151 | 152 | def commentify_all_notebooks(book_nb_path, original_nb_path, 153 | comment_group=DEFAULT_COMMENT_GROUP): 154 | """ 155 | Add comment-on-github links to each notebook in the book. 156 | """ 157 | book_content_p = Path(book_nb_path) 158 | original_p = Path(original_nb_path) 159 | 160 | to_comment = [b for b in book_content_p.glob('??-??-*.ipynb')] 161 | originals = [original_p / book.name for book in to_comment] 162 | 163 | if not all(o.exists() for o in originals): 164 | raise RuntimeError('One of the files does not exist in originals') 165 | 166 | for book, original in zip(to_comment, originals): 167 | print(f'on {book.name}') 168 | github_magic(str(book), str(original), comment_group=comment_group) 169 | # Don't be greedy 170 | time.sleep(1) 171 | 172 | 173 | def find_headers(notebook_name, highest_level=2, lowest_level=3): 174 | """ 175 | Find all headers in the specified range in the notebook. 176 | 177 | Parameters 178 | ---------- 179 | notebook_name : str 180 | Name of a Jupyter notebook. 181 | 182 | highest_level : int, optional 183 | The highest level header to be identified (1 is highest, 6 is lowest). 184 | 185 | lowest_level : int, optional 186 | The lowest level header to be identified (1 is highest, 6 is lowest). 187 | Must be less than or equal to ``highest_level``. 188 | 189 | Returns 190 | ------- 191 | 192 | dict 193 | Keys of the dictionary are the headings (including the leading 194 | hashtags), values are the line number on which the heading 195 | appears in the json source of the notebook. 196 | """ 197 | headings = {} 198 | 199 | # No idea at all why any line number offset is needed, but this 200 | # seems to do the trick. 201 | line_number_offset = 1 202 | 203 | # Generate the part of the regex pattern that represents the hashtags 204 | # that are the beginning of a heading. 205 | hashtags = [] 206 | for level in range(highest_level, lowest_level + 1): 207 | hashtags.append('#' * level) 208 | 209 | hashtags = '|'.join(hashtags) 210 | 211 | header = re.compile(r'(' + f'({hashtags})' + r' +[a-zA-Z].+?\n)') 212 | 213 | notebook = nbf.read(notebook_name, as_version=4) 214 | for cell in markdown_cells(notebook): 215 | groups = [g for g in re.finditer(header, cell['source'])] 216 | for g in groups: 217 | # We have a header, will get line numbers shortly 218 | headings[g.group(0)] = -1 219 | 220 | with open(notebook_name, 'r') as f: 221 | nb_lines = f.readlines() 222 | 223 | for head in headings.keys(): 224 | for line_num, line in enumerate(nb_lines): 225 | if head[:-1] in line: 226 | if headings[head] > 0: 227 | print(f'Oh no! Bad {notebook_name}') 228 | print(f'...duplicate heading: {head}') 229 | raise RuntimeError('oh no') 230 | headings[head] = line_num + line_number_offset 231 | 232 | return headings 233 | 234 | 235 | def delete_branches_prs(comment_group, repo): 236 | """ 237 | WARNING: THIS IRREVERSIBLY NUKES STUFF ON GITHUB. It does not in any way 238 | affect local files. 239 | 240 | Close all PRs and delete all branches with the label whose 241 | name is ``comment_group``. 242 | 243 | Parameters 244 | ---------- 245 | 246 | comment_group : str 247 | The label used for PRs/branches in this bundle for review. 248 | 249 | repo : github3 Repo 250 | The repository on which to act. 251 | """ 252 | # 1 Get a list of the "issues" with this label 253 | pr_issues = [_ for _ in repo.issues(labels=comment_group)] 254 | # 2 GEt a list of refs to use later for deleting branches 255 | refs = [ref for ref in repo.refs()] 256 | 257 | # Buildup lists of closures/deletions so that the user can be warned 258 | # of what will be deleted. 259 | PRs_to_close = [] 260 | branches_to_delete = [] 261 | 262 | for pri in pr_issues: 263 | # 2. Get the PR 264 | pr = pri.pull_request() 265 | # 3. Get name of the branch 266 | head = pr.head 267 | # 4. Add the pr to the closure list 268 | PRs_to_close.append(pr) 269 | 270 | # 5. Find the ref that matches this branch 271 | for ref in refs: 272 | if ref.ref.endswith(head.ref): 273 | # Add the branch to the deletion list 274 | branches_to_delete.append(ref) 275 | break 276 | else: 277 | raise RuntimeError(f'No ref for branch {head.ref} found') 278 | 279 | warning = [f"Pull requests to be closed ({len(PRs_to_close)}):\n"] 280 | for pr in PRs_to_close: 281 | warning.append(f" - #{pr.number} {pr.title}") 282 | warning.append(f"\t\t{pr.html_url}") 283 | 284 | response = '' 285 | print('\n'.join(warning), '\n') 286 | while response not in ['yes', 'no']: 287 | response = input("DO YOU WANT TO CLOSE " 288 | "THESE PRs (yes or no)? > ") 289 | 290 | if response == 'yes': 291 | for pr in PRs_to_close: 292 | # Be a good citizen 293 | time.sleep(random.uniform(0.1, 0.5)) 294 | pr.close() 295 | 296 | print('\n\n') 297 | warning = [f"These {len(branches_to_delete)} branches will " 298 | f"be PERMANENTLY DELETED:\n"] 299 | 300 | for branch in branches_to_delete: 301 | warning.append(f"\t{branch.ref}") 302 | 303 | response = '' 304 | print('\n'.join(warning), '\n') 305 | 306 | while response not in ['yes', 'no']: 307 | response = input("DO YOU WANT TO DELETE THESE " 308 | "BRANCHES (yes or no)? > ") 309 | 310 | if response == 'yes': 311 | for branch in branches_to_delete: 312 | branch.delete() 313 | -------------------------------------------------------------------------------- /notebooks/image_sim.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | from astropy.modeling.models import Gaussian2D, MexicanHat2D, Const2D 5 | from photutils.datasets import (make_random_gaussians_table, 6 | make_gaussian_sources_image) 7 | from photutils.aperture import EllipticalAperture 8 | 9 | 10 | def read_noise(image, amount, gain=1): 11 | """ 12 | Generate simulated read noise. 13 | 14 | Parameters 15 | ---------- 16 | 17 | image: numpy array 18 | Image whose shape the noise array should match. 19 | amount : float 20 | Amount of read noise, in electrons. 21 | gain : float, optional 22 | Gain of the camera, in units of electrons/ADU. 23 | """ 24 | shape = image.shape 25 | 26 | noise = np.random.normal(scale=amount / gain, size=shape) 27 | 28 | return noise 29 | 30 | 31 | def bias(image, value, realistic=False): 32 | """ 33 | Generate simulated bias image. 34 | 35 | Parameters 36 | ---------- 37 | 38 | image: numpy array 39 | Image whose shape the bias array should match. 40 | value: float 41 | Bias level to add. 42 | realistic : bool, optional 43 | If ``True``, add some clomuns with somewhat higher bias value 44 | (a not uncommon thing) 45 | """ 46 | # This is the whole thing: the bias is really suppose to be a constant 47 | # offset! 48 | bias_im = np.zeros_like(image) + value 49 | 50 | # If we want a more realistic bias we need to do a little more work. 51 | if realistic: 52 | shape = image.shape 53 | number_of_colums = 5 54 | 55 | # We want a random-looking variation in the bias, but unlike the 56 | # readnoise the bias should *not* change from image to image, so we 57 | # make sure to always generate the same "random" numbers. 58 | rng = np.random.RandomState(seed=8392) # 20180520 59 | columns = rng.randint(0, shape[1], size=number_of_colums) 60 | # This adds a little random-looking noise into the data. 61 | col_pattern = rng.randint(0, int(0.1 * value), size=shape[0]) 62 | 63 | # Make the chosen columns a little brighter than the rest... 64 | for c in columns: 65 | bias_im[:, c] = value + col_pattern 66 | 67 | return bias_im 68 | 69 | 70 | def dark_current(image, current, exposure_time, gain=1.0, hot_pixels=False): 71 | """ 72 | Simulate dark current in a CCD, optionally including hot pixels. 73 | 74 | Parameters 75 | ---------- 76 | 77 | image : numpy array 78 | Image whose shape the cosmic array should match. 79 | current : float 80 | Dark current, in electrons/pixel/second, which is the way 81 | manufacturers typically report it. 82 | exposure_time : float 83 | Length of the simulated exposure, in seconds. 84 | gain : float, optional 85 | Gain of the camera, in units of electrons/ADU. 86 | hot_pixels : bool, optional 87 | If ``True``, add hot pixels to the image. 88 | 89 | Returns 90 | ------- 91 | 92 | numpy array 93 | An array the same shape and dtype as the input containing dark counts 94 | in units of ADU. 95 | """ 96 | 97 | # dark current for every pixel; we'll modify the current for some pixels if 98 | # the user wants hot pixels. 99 | base_current = current * exposure_time / gain 100 | 101 | # This random number generation should change on each call. 102 | dark_im = np.random.poisson(base_current, size=image.shape) 103 | 104 | if hot_pixels: 105 | # We'll set 0.01% of the pixels to be hot; that is probably too high 106 | # but should ensure they are visible. 107 | y_max, x_max = dark_im.shape 108 | 109 | n_hot = int(0.0001 * x_max * y_max) 110 | 111 | # Like with the bias image, we want the hot pixels to always be in the 112 | # same places (at least for the same image size) but also want them to 113 | # appear to be randomly distributed. So we set a random number seed to 114 | # ensure we always get the same thing. 115 | rng = np.random.RandomState(16201649) 116 | hot_x = rng.randint(0, x_max, size=n_hot) 117 | hot_y = rng.randint(0, y_max, size=n_hot) 118 | 119 | hot_current = 10000 * current 120 | 121 | dark_im[[hot_y, hot_x]] = hot_current * exposure_time / gain 122 | 123 | return dark_im 124 | 125 | 126 | def sky_background(image, sky_counts, gain=1): 127 | """ 128 | Generate sky background, optionally including a gradient across the 129 | image (because some times Moons happen). 130 | 131 | Parameters 132 | ---------- 133 | 134 | image : numpy array 135 | Image whose shape the cosmic array should match. 136 | sky_counts : float 137 | The target value for the number of counts (as opposed to electrons or 138 | photons) from the sky. 139 | gain : float, optional 140 | Gain of the camera, in units of electrons/ADU. 141 | """ 142 | sky_im = np.random.poisson(sky_counts * gain, size=image.shape) / gain 143 | 144 | return sky_im 145 | 146 | 147 | def stars(image, number, max_counts=10000, gain=1, fwhm=4): 148 | """ 149 | Add some stars to the image. 150 | """ 151 | # Most of the code below is a direct copy/paste from 152 | # https://photutils.readthedocs.io/en/stable/_modules/photutils/datasets/make.html#make_100gaussians_image 153 | 154 | flux_range = [max_counts / 10, max_counts] 155 | 156 | y_max, x_max = image.shape 157 | xmean_range = [0.1 * x_max, 0.9 * x_max] 158 | ymean_range = [0.1 * y_max, 0.9 * y_max] 159 | xstddev_range = [fwhm, fwhm] 160 | ystddev_range = [fwhm, fwhm] 161 | params = dict([('amplitude', flux_range), 162 | ('x_mean', xmean_range), 163 | ('y_mean', ymean_range), 164 | ('x_stddev', xstddev_range), 165 | ('y_stddev', ystddev_range), 166 | ('theta', [0, 2 * np.pi])]) 167 | 168 | sources = make_random_gaussians_table(number, params, 169 | random_state=12345) 170 | 171 | star_im = make_gaussian_sources_image(image.shape, sources) 172 | 173 | return star_im 174 | 175 | 176 | def make_cosmic_rays(image, number, strength=10000): 177 | """ 178 | Generate an image with a few cosmic rays. 179 | 180 | Parameters 181 | ---------- 182 | 183 | image numpy array 184 | Image whose shape the cosmic array should match. 185 | number: float 186 | Number of cosmic rays to add to the image. 187 | strength : float, optional 188 | Pixel count in the cosmic rays. 189 | """ 190 | 191 | cr_image = np.zeros_like(image) 192 | 193 | # Yes, the order below is correct. The x axis is the column, which 194 | # is the second index. 195 | max_y, max_x = cr_image.shape 196 | 197 | # Get the smallest dimension to ensure the cosmic rays are within the image 198 | maximum_pos = np.min(cr_image.shape) 199 | # These will be center points of the cosmic rays, which we place away from 200 | # the edges to ensure they are visible. 201 | xy_cr = np.random.randint(0.1 * maximum_pos, 0.9 * maximum_pos, 202 | size=[number, 2]) 203 | 204 | cr_length = 5 # pixels, a little big 205 | cr_width = 2 206 | theta_cr = 2 * np.pi * np.random.rand() 207 | apertures = EllipticalAperture(xy_cr, cr_length, cr_width, theta_cr) 208 | masks = apertures.to_mask(method='center') 209 | for mask in masks: 210 | cr_image += strength * mask.to_image(shape=cr_image.shape) 211 | 212 | return cr_image 213 | 214 | 215 | # Functions related to simulated flat images 216 | 217 | def make_one_donut(center, diameter=10, amplitude=0.25): 218 | sigma = diameter / 2 219 | mh = MexicanHat2D(amplitude=amplitude, 220 | x_0=center[0], y_0=center[1], 221 | sigma=sigma) 222 | gauss = Gaussian2D(amplitude=amplitude, 223 | x_mean=center[0], y_mean=center[1], 224 | x_stddev=sigma, y_stddev=sigma) 225 | return Const2D(amplitude=1) + (mh - gauss) 226 | 227 | 228 | def add_donuts(image, number=20): 229 | """ 230 | Create a transfer function, i.e. matrix by which you multiply 231 | input counts to obtain actual counts. 232 | 233 | Parameters 234 | ---------- 235 | 236 | 237 | image : numpy array 238 | Image whose shape the cosmic array should match. 239 | 240 | number : int, optional 241 | Number of dust donuts to add. 242 | """ 243 | 244 | y, x = np.indices(image.shape) 245 | 246 | # The dust donuts should always be in the same place... 247 | rng = np.random.RandomState(43901) 248 | shape = np.array(image.shape) 249 | border_padding = 50 250 | 251 | # We'll make the dust specks range from 1% to 5% of the image size, but 252 | # only in a couple of sizes. The dust grains themselves are fairly uniform 253 | # in size (I think), and there are only a fwe elements on which dust can 254 | # settle. Size on the image is determined by size of the dust and how far 255 | # it is from the CCD chip. 256 | 257 | min_diam = int(0.02 * shape.max()) 258 | max_diam = int(0.05 * shape.max()) 259 | 260 | # Weight towards the smaller donuts because it looks more like real flats.. 261 | diameters = rng.choice([min_diam, min_diam, min_diam, max_diam], 262 | size=number) 263 | 264 | # Add a little variation in amplitude 265 | amplitudes = rng.normal(0.25, 0.05, size=number) 266 | center_x = rng.randint(border_padding, 267 | high=shape[1] - border_padding, size=number) 268 | center_y = rng.randint(border_padding, 269 | high=shape[0] - border_padding, size=number) 270 | centers = [[x, y] for x, y in zip(center_x, center_y)] 271 | 272 | donut_model = make_one_donut(centers[0], diameter=diameters[0], 273 | amplitude=amplitudes[0]) 274 | donut_im = donut_model(x, y) 275 | idx = 1 276 | for center, diam, amplitude in zip(centers[1:], 277 | diameters[1:], 278 | amplitudes[1:]): 279 | idx += 1 280 | donut_model = make_one_donut(center, diameter=diam, 281 | amplitude=amplitude) 282 | donut_im += donut_model(x, y) 283 | 284 | donut_im /= number 285 | 286 | return donut_im 287 | 288 | 289 | def sensitivity_variations(image, vignetting=True, dust=True): 290 | """ 291 | Create a transfer function, i.e. matrix by which you multiply input 292 | counts to obtain actual counts. 293 | 294 | Parameters 295 | ---------- 296 | 297 | 298 | image : numpy array 299 | Image whose shape the cosmic array should match. 300 | 301 | vignetting : bool, optional 302 | If ``True``, darken image near corners. 303 | 304 | dust : bool, optional 305 | If ``True``, add some plausible-looking dust. 306 | """ 307 | 308 | sensitivity = np.zeros_like(image) + 1.0 309 | shape = np.array(sensitivity.shape) 310 | 311 | if dust or vignetting: 312 | # Yes, this should be y, x. 313 | y, x = np.indices(sensitivity.shape) 314 | 315 | if vignetting: 316 | # Generate very wide gaussian centered on the center of the image, 317 | # multiply the sensitivity by it. 318 | vign_model = Gaussian2D(amplitude=1, 319 | x_mean=shape[0] / 2, y_mean=shape[1] / 2, 320 | x_stddev=2 * shape.max(), 321 | y_stddev=2 * shape.max()) 322 | vign_im = vign_model(x, y) 323 | sensitivity *= vign_im 324 | 325 | if dust: 326 | dust_im = add_donuts(image, number=40) 327 | dust_im = dust_im / dust_im.max() 328 | sensitivity *= dust_im 329 | 330 | return sensitivity 331 | -------------------------------------------------------------------------------- /notebooks/01-11-reading-images.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Reading images\n", 8 | "\n", 9 | "Astropy provides a few ways to read in FITS images, some in the core package and\n", 10 | "others in affiliated packages.\n", 11 | "\n", 12 | "Before exploring those, we'll create a set of (fake) images to work with." 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "from pathlib import Path\n", 22 | "from astropy.nddata import CCDData\n", 23 | "from astropy.io import fits" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "## Working with directories\n", 31 | "\n", 32 | "The cell below contains the path to the images. In this notebook we'll use it\n", 33 | "both to store the fake images we generate and to read images. In normal use, you\n", 34 | "wouldn't start by writing images there, however.\n", 35 | "\n", 36 | "If the images are in the same directory as the notebook, you can omit this or\n", 37 | "set it to an empty string `''`. Having images in the same directory as the\n", 38 | "notebook is less complicated, but it's not at all uncommon to need to work with\n", 39 | "images in a different directory.\n", 40 | "\n", 41 | "Later, we'll look at how to generate the full path to an image (directory plus\n", 42 | "file name) in a way that will work on any platform. One of the approaches to\n", 43 | "loading images (using `ccdproc.ImageFileCollection`) lets you mostly forget\n", 44 | "about this." 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "data_directory = 'path/to/my/images'" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "## Generate some fake images\n", 61 | "\n", 62 | "The cells below generate some fake images to use later in the notebook." 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "from pathlib import Path\n", 72 | "from itertools import cycle\n", 73 | "\n", 74 | "import numpy as np\n", 75 | "\n", 76 | "image_path = Path(data_directory)\n", 77 | "\n", 78 | "image_path.mkdir(parents=True, exist_ok=True)\n", 79 | "\n", 80 | "images_to_generate = {\n", 81 | " 'BIAS': 5,\n", 82 | " 'DARK': 10,\n", 83 | " 'FLAT': 3,\n", 84 | " 'LIGHT': 10\n", 85 | "}\n", 86 | "\n", 87 | "exposure_times = {\n", 88 | " 'BIAS': [0.0],\n", 89 | " 'DARK': [5.0, 30.0],\n", 90 | " 'FLAT': [5.0, 6.1, 7.3],\n", 91 | " 'LIGHT': [30.0],\n", 92 | "}\n", 93 | "\n", 94 | "filters = {\n", 95 | " 'FLAT': 'V',\n", 96 | " 'LIGHT': 'V'\n", 97 | "}\n", 98 | "\n", 99 | "objects = {\n", 100 | " 'LIGHT': ['m82', 'xx cyg']\n", 101 | "}\n", 102 | "\n", 103 | "image_size = [300, 200]\n", 104 | "\n", 105 | "image_number = 0\n", 106 | "for image_type, num in images_to_generate.items():\n", 107 | " exposures = cycle(exposure_times[image_type])\n", 108 | " try:\n", 109 | " filts = cycle(filters[image_type])\n", 110 | " except KeyError:\n", 111 | " filts = []\n", 112 | " \n", 113 | " try:\n", 114 | " objs = cycle(objects[image_type])\n", 115 | " except KeyError:\n", 116 | " objs = []\n", 117 | " for _ in range(num):\n", 118 | " img = CCDData(data=np.random.randn(*image_size), unit='adu')\n", 119 | " img.meta['IMAGETYP'] = image_type\n", 120 | " img.meta['EXPOSURE'] = next(exposures)\n", 121 | " if filts:\n", 122 | " img.meta['FILTER'] = next(filts)\n", 123 | " if objs:\n", 124 | " img.meta['OBJECT'] = next(objs)\n", 125 | " image_name = str(image_path / f'img-{image_number:04d}.fits')\n", 126 | " img.write(image_name)\n", 127 | " print(image_name)\n", 128 | " image_number += 1" 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "metadata": {}, 134 | "source": [ 135 | "## Option 1: Reading a single image with `astropy.io.fits`\n", 136 | "\n", 137 | "This option gives you the most flexibility but is the least adapted to CCD\n", 138 | "images specifically. What you read in is a list of FITS extensions; you must\n", 139 | "first select the one you want then access the data or header as desired.\n", 140 | "\n", 141 | "We'll open up the first of the fake images, `img-0001.fits`. To combine that\n", 142 | "with the directory name we'll use Python 3's `pathlib`, which ensures that the\n", 143 | "path combination will work on Windows too." 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": null, 149 | "metadata": {}, 150 | "outputs": [], 151 | "source": [ 152 | "image_name = 'img-0001.fits'\n", 153 | "\n", 154 | "image_path = Path(data_directory) / image_name\n", 155 | "\n", 156 | "hdu_list = fits.open(image_path)\n", 157 | "hdu_list.info()" 158 | ] 159 | }, 160 | { 161 | "cell_type": "markdown", 162 | "metadata": {}, 163 | "source": [ 164 | "The `hdu_list` is a list of FITS Header-Data Units. In this case there is just\n", 165 | "one, containing both the image header and data, which can be accessed as shown\n", 166 | "below." 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "hdu = hdu_list[0]\n", 176 | "hdu.header" 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "execution_count": null, 182 | "metadata": {}, 183 | "outputs": [], 184 | "source": [ 185 | "hdu.data" 186 | ] 187 | }, 188 | { 189 | "cell_type": "markdown", 190 | "metadata": {}, 191 | "source": [ 192 | "The [documentation for io.fits](https://astropy.readthedocs.io/en/stable/io/fits/index.html) describes more of its capabilities." 193 | ] 194 | }, 195 | { 196 | "cell_type": "markdown", 197 | "metadata": {}, 198 | "source": [ 199 | "## Option 2: Use `CCDData` to read in a single image\n", 200 | "\n", 201 | "Astropy contains a `CCDData` object for representing a single image. It's not as\n", 202 | "flexible as using `astrop.io.fits` directly (for example, it assumes there is\n", 203 | "only one FITS extension and that it contains image data) but it sets up several\n", 204 | "properties that make the data easier to work with.\n", 205 | "\n", 206 | "We'll read in the same single image we did in the example above,\n", 207 | "`img-0001.fits`." 208 | ] 209 | }, 210 | { 211 | "cell_type": "code", 212 | "execution_count": null, 213 | "metadata": {}, 214 | "outputs": [], 215 | "source": [ 216 | "ccd = CCDData.read(image_path)" 217 | ] 218 | }, 219 | { 220 | "cell_type": "markdown", 221 | "metadata": {}, 222 | "source": [ 223 | "The data and header are accessed similarly to how you access it in an HDU\n", 224 | "returned by `astropy.io.fits`:" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": null, 230 | "metadata": {}, 231 | "outputs": [], 232 | "source": [ 233 | "ccd.header" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": null, 239 | "metadata": {}, 240 | "outputs": [], 241 | "source": [ 242 | "ccd.data" 243 | ] 244 | }, 245 | { 246 | "cell_type": "markdown", 247 | "metadata": {}, 248 | "source": [ 249 | "There are a [number of features of `CCDData`](https://astropy.readthedocs.io/en/stable/nddata/ccddata.html) that make it convenient for working\n", 250 | "with WCS, slicing, and more. Some of those features will be discussed in more\n", 251 | "detail in the notebooks that follow." 252 | ] 253 | }, 254 | { 255 | "cell_type": "markdown", 256 | "metadata": {}, 257 | "source": [ 258 | "## Option 3: Working with a directory of images using `ImageFileCollection`\n", 259 | "\n", 260 | "The affiliated package [ccdproc](https://ccdproc.readthedocs.io/) provides an easier way\n", 261 | "to work with collections of images in a directory: an `ImageFileCollection`. The\n", 262 | "`ImageFileCollection` is initialized with the name of the directory containing\n", 263 | "the images." 264 | ] 265 | }, 266 | { 267 | "cell_type": "code", 268 | "execution_count": null, 269 | "metadata": {}, 270 | "outputs": [], 271 | "source": [ 272 | "from ccdproc import ImageFileCollection\n", 273 | "im_collection = ImageFileCollection(data_directory)" 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "metadata": {}, 279 | "source": [ 280 | "Note that we didn't need to worry about using `pathlib` to combine the directory\n", 281 | "and file name, instead we give the collection the name of the directory." 282 | ] 283 | }, 284 | { 285 | "cell_type": "markdown", 286 | "metadata": {}, 287 | "source": [ 288 | "### Summary of directory contents\n", 289 | "\n", 290 | "The `summary` property provides an overview of the files in the directory: it's\n", 291 | "an astropy `Table`, so you can access columns in the usual way." 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": null, 297 | "metadata": {}, 298 | "outputs": [], 299 | "source": [ 300 | "im_collection.summary" 301 | ] 302 | }, 303 | { 304 | "cell_type": "markdown", 305 | "metadata": {}, 306 | "source": [ 307 | "### Filtering and iterating over images\n", 308 | "\n", 309 | "The great thing about `ImageFileCollection` is that it provides convenient ways\n", 310 | "to filter or loop over files via FITS header keyword values.\n", 311 | "\n", 312 | "For example, looping over just the flat files is one line of code:" 313 | ] 314 | }, 315 | { 316 | "cell_type": "code", 317 | "execution_count": null, 318 | "metadata": {}, 319 | "outputs": [], 320 | "source": [ 321 | "for a_flat in im_collection.hdus(imagetyp='FLAT'):\n", 322 | " print(a_flat.header['EXPOSURE'])" 323 | ] 324 | }, 325 | { 326 | "cell_type": "markdown", 327 | "metadata": {}, 328 | "source": [ 329 | "Instead of iterating over HDUs, as in the example above, you can iterate over\n", 330 | "just the headers (with `.headers`) or just the data (with `.data`). You can use\n", 331 | "any FITS keyword from the header as a keyword for selecting the images you want.\n", 332 | "In addition, you can return the file name while also iterating." 333 | ] 334 | }, 335 | { 336 | "cell_type": "code", 337 | "execution_count": null, 338 | "metadata": {}, 339 | "outputs": [], 340 | "source": [ 341 | "for a_flat, fname in im_collection.hdus(imagetyp='LIGHT', object='m82', return_fname=True):\n", 342 | " print(f'In file {fname} the exposure is:', a_flat.header['EXPOSURE'], 'with standard deviation ', a_flat.data.std())" 343 | ] 344 | }, 345 | { 346 | "cell_type": "markdown", 347 | "metadata": {}, 348 | "source": [ 349 | "The [documentation for `ImageFileCollection`](https://ccdproc.readthedocs.io/en/latest/ccdproc/image_management.html) describes more of its capabilities.\n", 350 | "`ImageFileCollection` can automatically save a copy of each image as you iterate\n", 351 | "over them, for example." 352 | ] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "execution_count": null, 357 | "metadata": {}, 358 | "outputs": [], 359 | "source": [ 360 | "for a_flat, fname in im_collection.ccds(bunit='ADU', return_fname=True):\n", 361 | " print(a_flat.unit)" 362 | ] 363 | }, 364 | { 365 | "cell_type": "code", 366 | "execution_count": null, 367 | "metadata": {}, 368 | "outputs": [], 369 | "source": [ 370 | "a_flat.header" 371 | ] 372 | } 373 | ], 374 | "metadata": { 375 | "kernelspec": { 376 | "display_name": "Python 3", 377 | "language": "python", 378 | "name": "python3" 379 | }, 380 | "language_info": { 381 | "codemirror_mode": { 382 | "name": "ipython", 383 | "version": 3 384 | }, 385 | "file_extension": ".py", 386 | "mimetype": "text/x-python", 387 | "name": "python", 388 | "nbconvert_exporter": "python", 389 | "pygments_lexer": "ipython3", 390 | "version": "3.6.8" 391 | } 392 | }, 393 | "nbformat": 4, 394 | "nbformat_minor": 2 395 | } 396 | --------------------------------------------------------------------------------