mirror of
https://asciireactor.com/otho/psdlag-agn.git
synced 2024-11-24 02:35:05 +00:00
first successful lag output with new system
This commit is contained in:
parent
8829072a49
commit
142f013d80
@ -1,53 +1,57 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import clag
|
|
||||||
import sys
|
import sys
|
||||||
|
import getopt
|
||||||
|
|
||||||
|
sys.path.insert(1,"/usr/local/science/clag/")
|
||||||
|
import clag
|
||||||
|
|
||||||
|
# psd1_output = open("psd1.tmp")
|
||||||
|
# psd2_output = open("psd2.tmp")
|
||||||
|
# lag_output = open("lag.tmp")
|
||||||
|
|
||||||
# For jupyter notebook
|
# For jupyter notebook
|
||||||
# %pylab inline
|
# %pylab inline
|
||||||
|
|
||||||
try:
|
try:
|
||||||
opts,args = getopt.getopt(args, "")
|
opts,args = getopt.getopt(sys.argv[1:], "")
|
||||||
except getopt.GetoptError:
|
except getopt.GetoptError:
|
||||||
print 'analyze_lightcure.py <reference curve> <compared curve>'
|
print 'analyze_lightcure.py <reference curve> <compared curve>'
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
## load the first light curve
|
## load the first light curve
|
||||||
lc1 = np.loadtxt(args[0])
|
lc1_table = np.loadtxt(args[0],skiprows=1)
|
||||||
|
|
||||||
# works if first two entries represent minimum spacing, from example
|
# works if first two entries represent minimum spacing, from example
|
||||||
# dt = lc1[1,0] - lc1[0, 0]
|
# dt = lc1_table[1,0] - lc1_table[0, 0]
|
||||||
|
|
||||||
# Time resolution determined from inspection and testing. This script
|
# Time resolution determined from inspection and testing. This script
|
||||||
# does not expect evenly spaced data in time.
|
# does not expect evenly spaced data in time.
|
||||||
dt = 0.1
|
dt = 0.1
|
||||||
|
|
||||||
|
|
||||||
_ = plot(lc1[:,0], lc1[:,1])
|
# _ = plot(lc1_table[:,0], lc1_table[:,1])
|
||||||
_ = plot(lc1[:,0], lc1[:,3])
|
# _ = plot(lc1_table[:,0], lc1_table[:,3])
|
||||||
|
|
||||||
|
|
||||||
# Split the light curve into segments #
|
# Split the light curve into segments #
|
||||||
seg_length = 256
|
#seg_length = 256
|
||||||
index = np.arange(len(lc1)).reshape((-1, seg_length))
|
#index = np.arange(len(data)).reshape((-1, seg_length))
|
||||||
|
|
||||||
lc1_time = [lc1[i, 0] for i in index]
|
# For now, instead of splitting up the curves, the program will assume
|
||||||
lc1_strength = [lc1[i, 1] for i in index]
|
# that the data list is shorter than 256 elemements. so,
|
||||||
lc1_strength_err = [lc1[i, 2] for i in index]
|
|
||||||
|
|
||||||
# This would work if both curves are in same file
|
index = np.arange(len(lc1_table)).reshape(-1,len(lc1_table))
|
||||||
# lc2 = [lc1[i, 3] for i in index]
|
|
||||||
#Lc2e = [lc1[i, 4] for i in index]
|
|
||||||
|
|
||||||
|
lc1_time = [lc1_table[i, 0] for i in index]
|
||||||
# Load second light curve
|
lc1_strength = [lc1_table[i, 1] for i in index]
|
||||||
lc2 = np.loadtxt(args[1])
|
lc1_strength_err = [lc1_table[i, 2] for i in index]
|
||||||
|
|
||||||
#### Get the psd for the first light curve ####
|
#### Get the psd for the first light curve ####
|
||||||
|
|
||||||
# These bin values determined summer 2015 for STORM III optical/UV lightcurves
|
# These bin values determined summer 2016 for STORM III optical/UV lightcurves
|
||||||
fqL = [0.0049999999, 0.018619375, 0.044733049, 0.069336227, 0.10747115, 0.16658029, 0.25819945, 0.40020915, 0.62032418]
|
fqL = np.array([0.0049999999, 0.018619375, 0.044733049, 0.069336227, 0.10747115, 0.16658029, 0.25819945, 0.40020915, 0.62032418])
|
||||||
|
|
||||||
# using utilities to set up frequency bins #
|
# using utilities to set up frequency bins #
|
||||||
# fqL = np.logspace(np.log10(1.1/seg_length),np.log10(.5/dt),7)
|
# fqL = np.logspace(np.log10(1.1/seg_length),np.log10(.5/dt),7)
|
||||||
@ -56,7 +60,6 @@ fqL = [0.0049999999, 0.018619375, 0.044733049, 0.069336227, 0.10747115, 0.166580
|
|||||||
nfq = len(fqL) - 1
|
nfq = len(fqL) - 1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## initialize the psd class for multiple light curves ##
|
## initialize the psd class for multiple light curves ##
|
||||||
P1 = clag.clag('psd', lc1_time, lc1_strength, lc1_strength_err, dt, fqL)
|
P1 = clag.clag('psd', lc1_time, lc1_strength, lc1_strength_err, dt, fqL)
|
||||||
|
|
||||||
@ -68,7 +71,7 @@ P1 = clag.clag('psd', lc1_time, lc1_strength, lc1_strength_err, dt, fqL)
|
|||||||
inpars = np.ones(nfq)
|
inpars = np.ones(nfq)
|
||||||
|
|
||||||
## print the loglikelihood for the input values ##
|
## print the loglikelihood for the input values ##
|
||||||
print P1.logLikelihood(inpars)
|
P1.logLikelihood(inpars)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -81,15 +84,31 @@ psd1, psd1e = clag.optimize(P1, inpars)
|
|||||||
## plot ##
|
## plot ##
|
||||||
fqd = 10**(np.log10( (fqL[:-1]*fqL[1:]) )/2.)
|
fqd = 10**(np.log10( (fqL[:-1]*fqL[1:]) )/2.)
|
||||||
|
|
||||||
loglog(fqd, 0.1*fqd**(-1.5), label='input psd')
|
#loglog(fqd, 0.1*fqd**(-1.5), label='input psd')
|
||||||
errorbar(fqd[1:-1], psd1[1:-1], yerr=psd1e[1:-1], fmt='o', ms=10, label='fit')
|
#errorbar(fqd[1:-1], psd1[1:-1], yerr=psd1e[1:-1], fmt='o', ms=10, label='fit')
|
||||||
|
|
||||||
|
# load second lightcurve
|
||||||
|
|
||||||
|
# This would work if both curves are in same file
|
||||||
|
# lc2_strength = [lc1_table[i, 3] for i in index]
|
||||||
|
# lc2_strength_err = [lc1_table[i, 4] for i in index]
|
||||||
|
|
||||||
|
# But, they aren't, so,
|
||||||
|
|
||||||
|
# Load second light curve
|
||||||
|
lc2_table = np.loadtxt(args[1],skiprows=1)
|
||||||
|
|
||||||
|
index = np.arange(len(lc2_table)).reshape(-1,len(lc2_table))
|
||||||
|
|
||||||
|
lc2_time = [lc2_table[i, 0] for i in index]
|
||||||
|
lc2_strength = [lc2_table[i, 1] for i in index]
|
||||||
|
lc2_strength_err = [lc2_table[i, 2] for i in index]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Now do the second light curve
|
## Now do the second light curve
|
||||||
|
|
||||||
P2 = clag.clag('psd', lc1_time, lc2, Lc2e, dt, fqL)
|
P2 = clag.clag('psd', lc2_time, lc2_strength, lc2_strength_err, dt, fqL)
|
||||||
|
|
||||||
psd2, psd2e = clag.optimize(P2, inpars)
|
psd2, psd2e = clag.optimize(P2, inpars)
|
||||||
|
|
||||||
@ -102,8 +121,8 @@ psd2, psd2e = clag.optimize(P2, inpars)
|
|||||||
### We also give it the calculated psd values as input ###
|
### We also give it the calculated psd values as input ###
|
||||||
Cx = clag.clag('cxd',
|
Cx = clag.clag('cxd',
|
||||||
[list(i) for i in zip(lc1_time,lc1_time)],
|
[list(i) for i in zip(lc1_time,lc1_time)],
|
||||||
[list(i) for i in zip(lc1_strength,lc2)],
|
[list(i) for i in zip(lc1_strength,lc2_strength)],
|
||||||
[list(i) for i in zip(lc1_strength_err,Lc2e)],
|
[list(i) for i in zip(lc1_strength_err,lc2_strength_err)],
|
||||||
dt, fqL, psd1, psd2)
|
dt, fqL, psd1, psd2)
|
||||||
|
|
||||||
inpars = np.concatenate( (0.3*(psd1*psd2)**0.5, psd1*0+1.) )
|
inpars = np.concatenate( (0.3*(psd1*psd2)**0.5, psd1*0+1.) )
|
||||||
@ -112,6 +131,8 @@ phi, phie = p[nfq:], pe[nfq:]
|
|||||||
lag, lage = phi/(2*np.pi*fqd), phie/(2*np.pi*fqd)
|
lag, lage = phi/(2*np.pi*fqd), phie/(2*np.pi*fqd)
|
||||||
cx, cxe = p[:nfq], pe[:nfq]
|
cx, cxe = p[:nfq], pe[:nfq]
|
||||||
|
|
||||||
|
np.savetxt("lag.out",lag)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -121,7 +142,7 @@ cx, cxe = p[:nfq], pe[:nfq]
|
|||||||
|
|
||||||
## plot ##
|
## plot ##
|
||||||
|
|
||||||
semilogx(fqd, fqd*0+1.0, label='input phase lag')
|
#semilogx(fqd, fqd*0+1.0, label='input phase lag')
|
||||||
ylim([0.8, 1.2])
|
#ylim([0.8, 1.2])
|
||||||
errorbar(fqd[1:-1], phi[1:-1], yerr=phie[1:-1], fmt='o', ms=10, label='fit')
|
#errorbar(fqd[1:-1], phi[1:-1], yerr=phie[1:-1], fmt='o', ms=10, label='fit')
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user