This notebook is outdated and will be deprecated in October 2024

This notebook currently fails to execute, use as reference only

MRS Pipeline#

Use case: Extract spatial-spectral features from IFU cube and measure their attributes.
Data: Simulated MIRI MRS spectrum of AGB star.
Tools: specutils, jwst, photutils, astropy, aplpy, scipy.
Cross-intrument: NIRSpec, MIRI.
Documentation: This notebook is part of a STScI’s larger post-pipeline Data Analysis Tools Ecosystem and can be downloaded directly from the JDAT Notebook Github directory.
Source of Simulations: MIRISim
Pipeline Version: JWST Pipeline

Note: This notebook includes MIRI simulated data cubes obtained using MIRISim (https://wiki.miricle.org//bin/view/Public/MIRISim_Public) and run through the JWST pipeline (https://jwst-pipeline.readthedocs.io/en/latest/) of point sources with spectra representative of late M type stars.

Introduction#

This notebook analyzes one star represented by a dusty SED corresponding to the ISO SWS spectrum of W Per from Kraemer et al. (2002) and Sloan et al. (2003) to cover the MRS spectral range 5-28 microns. Analysis of JWST spectral cubes requires extracting spatial-spectral features of interest and measuring their attributes.

This is the first notebook, which will process the data and automatically detect and extract spectra for the point source. The workflow will use photutils to automatically detect sources in the cube to extract the spectrum of the point source. Then it will read in the spectra generated at Stage 3 of the JWST pipeline.

Import Packages#

# Import useful python packages
import numpy as np

# Import packages to display images inline in the notebook
import matplotlib.pyplot as plt    
%matplotlib inline   

# Set general plotting options
params = {'legend.fontsize': '18', 'axes.labelsize': '18', 
         'axes.titlesize': '18', 'xtick.labelsize': '18', 
         'ytick.labelsize': '18', 'lines.linewidth': 2, 'axes.linewidth': 2, 'animation.html': 'html5'}
plt.rcParams.update(params)
plt.rcParams.update({'figure.max_open_warning': 0})
import warnings
warnings.simplefilter('ignore')
# Import astropy packages 
from astropy import units as u
from astropy.io import ascii
from astropy.wcs import WCS
from astropy.table import Table, vstack
from astropy.stats import sigma_clipped_stats
from astropy.nddata import StdDevUncertainty
from astropy.io import fits # added by BAS on 8 April 2021
from astropy.utils.data import get_pkg_data_filename

# To find stars in the MRS spectralcubes and do aperture photometry
from photutils.aperture import CircularAperture
from photutils.detection import DAOStarFinder

# To deal with 1D spectrum
from specutils import Spectrum1D
from specutils.fitting import fit_generic_continuum
from specutils.manipulation import box_smooth, extract_region, SplineInterpolatedResampler
from specutils.analysis import line_flux, centroid, equivalent_width
from specutils.spectra import SpectralRegion
from specutils import SpectrumList

# To fit a curve to the data
from scipy.optimize import curve_fit

import os

Set paths to the Data and Outputs#

Use MIRISim JWST pipeline processed data in future iterations.

os.environ['CRDS_PATH'] = os.environ['HOME']+'/crds_cache'
os.environ['CRDS_SERVER_URL'] = 'https://jwst-crds-pub.stsci.edu'
# import pipeline

from jwst.pipeline import Detector1Pipeline
from jwst.pipeline import Spec2Pipeline
from jwst.pipeline import Spec3Pipeline
from jwst.extract_1d import Extract1dStep
import json
import glob
from jwst.associations.lib.rules_level3_base import DMS_Level3_Base
from jwst.associations import asn_from_list
import crds
from jdaviz.app import Application
import asdf
from photutils import aperture_photometry
import os
# Download data if you don't already have it.

import urllib.request

if os.path.exists("20210413_120546_mirisim.tar.gz"):
    print("20210413_120546_mirisim.tar.gz Exists")
else:
    url = 'https://data.science.stsci.edu/redirect/JWST/jwst-data_analysis_tools/MRS_Mstar_analysis/20210413_120546_mirisim.tar.gz'
    urllib.request.urlretrieve(url, './20210413_120546_mirisim.tar.gz')
    
if os.path.exists("20210413_123047_mirisim.tar.gz"):
    print("20210413_123047_mirisim.tar.gz Exists")
else:
    url = 'https://data.science.stsci.edu/redirect/JWST/jwst-data_analysis_tools/MRS_Mstar_analysis/20210413_123047_mirisim.tar.gz'
    urllib.request.urlretrieve(url, './20210413_123047_mirisim.tar.gz')
    
if os.path.exists("20210413_125354_mirisim.tar.gz"):
    print("20210413_125354_mirisim.tar.gz Exists")
else:
    url = 'https://data.science.stsci.edu/redirect/JWST/jwst-data_analysis_tools/MRS_Mstar_analysis/20210413_125354_mirisim.tar.gz'
    urllib.request.urlretrieve(url, './20210413_125354_mirisim.tar.gz')
# Unzip Tar Files

import tarfile

# Unzip files if they haven't already been unzipped
if os.path.exists("20210413_120546_mirisim/"):
    print("20210413_120546_mirisim Exists")
else:
    tar = tarfile.open('./20210413_120546_mirisim.tar.gz', "r:gz")
    tar.extractall()
    tar.close()
    
if os.path.exists("20210413_123047_mirisim/"):
    print("20210413_123047_mirisim Exists")
else:
    tar = tarfile.open('./20210413_123047_mirisim.tar.gz', "r:gz")
    tar.extractall()
    tar.close()
    
if os.path.exists("20210413_125354_mirisim/"):
    print("20210413_125354_mirisim Exists")
else:
    tar = tarfile.open('./20210413_125354_mirisim.tar.gz', "r:gz")
    tar.extractall()
    tar.close()

Run Pipeline#

The various stages of the pipeline can be run within Python. For a more in depth tutorial on running the pipelines, check out the JWebbinars.

# Execute calwebb_detector1 pipeline on raw simulation output.  This will overwrite previous reductions.

allshortfiles = glob.glob('20210413_*_mirisim/det_images/*MIRIFUSHORT*fits')
alllongfiles = glob.glob('20210413_*_mirisim/det_images/*MIRIFULONG*fits')
      
pipe1short = Detector1Pipeline()

# run calwebb_detector1 on the MIRIFUSHORT data separate from MIRIFULONG data, as it saves time this way
for shortfile in allshortfiles:
    print(shortfile)
    baseshort, remaindershort = shortfile.split('.')
    
    # If you run your own simulations, you will need to update these hardcoded files.
    beforestuffshort, dateafterstuffshort = shortfile.split('20210413_')    
    datestringshort, afterstuffshort = dateafterstuffshort.split('_mirisim')
    
    pipe1short.refpix.skip = True
    pipe1short.output_file = baseshort + datestringshort
    
    pipe1short.run(shortfile)

pipe1long = Detector1Pipeline()

for longfile in alllongfiles:
    print(longfile)
    baselong, remainderlong = longfile.split('.')
    
    # If you run your own simulations, you will need to update these hardcoded files.
    beforestufflong, dateafterstufflong = longfile.split('20210413_')
    datestringlong, afterstufflong = dateafterstufflong.split('_mirisim')
    
    pipe1long.refpix.skip = True
    pipe1long.output_file = baselong + datestringlong
    
    pipe1long.run(longfile)
# Execute calwebb_spec2 pipeline. This will overwrite previous reductions.

# All the local calwebb_detector1 files
allshortfiles2 = glob.glob('det_image_*_MIRIFUSHORT_*_rate.fits')
alllongfiles2 = glob.glob('det_image_*_MIRIFULONG_*_rate.fits')

for short2file in allshortfiles2:
    print(short2file)
    pipe2short = Spec2Pipeline()
    base2short, remainder2short = short2file.split('.')
    
    pipe2short.straylight.skip = True
    
    # If you run your own simulations, you will need to update these hardcoded files.
    if (short2file == 'det_image_seq1_MIRIFUSHORT_12LONGexp1125354_rate.fits'):
        print('this one will have the level 2b cube built')
    else:
        pipe2short.cube_build.skip = True
    pipe2short.extract_1d.skip = True
    pipe2short.output_file = base2short
        
    pipe2short.run(short2file)

for long2file in alllongfiles2:
    print(long2file)
    pipe2long = Spec2Pipeline()
    base2long, remainder2long = long2file.split('.')
    
    pipe2long.straylight.skip = True
    # If you run your own simulations, you will need to update these hardcoded files.
    if (long2file == 'det_image_seq1_MIRIFULONG_34SHORTexp1120546_rate.fits'):
        print('this one will have the level 2b cube built')
    else:
        pipe2long.cube_build.skip = True
    pipe2long.extract_1d.skip = True
    pipe2long.output_file = base2long
    
    pipe2long.run(long2file)

Now to detect the point source in the datacube and extract and plot the spectra for each source#

For data cubes like the JWST/MIRI MRS information on the point sources in the FOV and also obtaining a source subtracted data cube will be necessary (See the PampelMuse software for an example on how spectral extraction is implemented for near-IR data cubes like MUSE).

Note these backgrounds of diffuse emission can be quite complex.

On these source extracted data cubes (see SUBTRES in PampelMuse) I would like to produce moment maps (https://casa.nrao.edu/Release3.4.0/docs/UserMan/UserManse41.html) and Position-Velocity (PV) diagrams (https://casa.nrao.edu/Release4.1.0/doc/UserMan/UserManse42.html).

1) Use Photutils to detect stars/point sources in the continuum image#

The first step of the analysis is to identify those sources for which it is feasible to extract spectra from the IFU data. Ideally we can estimate the signal-to-noise ratio (S/N) for all sources in the cube, do a number of checks to determine the status of every source and loop through these (brightest first) to extract the spectra. Open up the Level 2 Cubes and use photutils to search for point sources for Level 3 extraction.

# If you run your own simulations, you will need to update these hardcoded files.
l_cube_file = 'det_image_seq1_MIRIFULONG_34SHORTexp1120546_s3d.fits'
s_cube_file = 'det_image_seq1_MIRIFUSHORT_12LONGexp1125354_s3d.fits'

with fits.open(s_cube_file) as hdu_s_cube:
    s_cube = hdu_s_cube['SCI'].data
    s_med_cube = np.nanmedian(s_cube, axis=0)

mean, median, std = sigma_clipped_stats(s_med_cube, sigma = 2.0)

# Get a list of sources using a dedicated source detection algorithm
# Find sources at least 3* background (typically)

daofind = DAOStarFinder(fwhm = 2.0, threshold = 3. * std)
sources = daofind(s_med_cube - median) 
print("\n Number of sources in field: ", len(sources))

# Positions in pixels
positions = Table([sources['xcentroid'], sources['ycentroid']])

# Convert to RA & Dec (ICRS)
peakpixval = np.zeros(len(sources['xcentroid']))
for count_s, _ in enumerate(sources):
    peakpixval[count_s] = s_med_cube[int(np.round(sources['xcentroid'][count_s])), int(np.round(sources['ycentroid'][count_s]))]
print('peak pixel x =')
print(sources['xcentroid'][np.argmax(peakpixval)])
print('peak pixel y =')
print(sources['ycentroid'][np.argmax(peakpixval)])

plt.imshow(s_med_cube, vmin=0, vmax=100)#.value)
plt.tight_layout()
plt.scatter(sources['xcentroid'], sources['ycentroid'], c = "red", marker = "+", s=50)
plt.scatter(sources['xcentroid'][np.argmax(peakpixval)], sources['ycentroid'][np.argmax(peakpixval)], c = 'black', marker='+', s=50)
plt.show()

f0 = fits.open(s_cube_file)
w0 = WCS(f0[('sci',1)].header, f0)
f0.close()

radec = w0.all_pix2world([sources['xcentroid'][np.argmax(peakpixval)]], [sources['ycentroid'][np.argmax(peakpixval)]], [1], 1)

# Take the brightest source flux and take that to be your primary point source for extraction
ra_ptsrc = radec[0][0]
dec_ptsrc = radec[1][0]
# Due to the way the pipeline currently extracts Level3 data, you must update the headers to be centered on the point source of your choosing from the step above.
all_files = glob.glob('det_image_*_cal.fits')
targra = ra_ptsrc
targdec = dec_ptsrc
for thisfile in all_files:
    base, remainder = thisfile.split('.')
    outfilename = base + '_fix.' + remainder
    print(outfilename)
    
    with fits.open(thisfile) as hduthis:
        hduthis['SCI'].header['SRCTYPE'] = 'POINT'
        hduthis[0].header['TARG_RA'] = targra
        hduthis[0].header['TARG_DEC'] = targdec
        hduthis.writeto(outfilename, overwrite=True)
# set up needed reference file(s) for spec3

file_all_list = glob.glob('det_image_*_cal_fix.fits')

asnall = asn_from_list.asn_from_list(file_all_list, rule=DMS_Level3_Base, product_name='combine_dithers_all_exposures')

asnallfile = 'for_spec3_all.json'
with open(asnallfile, 'w') as fpall:
    fpall.write(asnall.dump()[1])
# Execute calwebb_spec3 pipeline.  This will overwrite previous reductions.

pipe3ss = Spec3Pipeline()
pipe3ss.master_background.skip = True
pipe3ss.mrs_imatch.skip = True
pipe3ss.outlier_detection.skip = True
pipe3ss.resample_spec.skip = True
pipe3ss.combine_1d.skip = True
pipe3ss.use_source_posn = 'True'
pipe3ss.subtract_background = 'True'
pipe3ss.output_file = 'allspec3'
pipe3ss.run(asnallfile)

Next Step#

Proceed to Notebook 2 for visualization and data anlysis.

Additional Resources#

About this notebook#

Author: Olivia Jones, Project Scientist, UK ATC. Updated On: 2020-08-11 Later Updated On: 2021-09-06 by B. Sargent, STScI Scientist, Space Telescope Science Institute


Top of Page