Q3D-Calibration/qdx/Bind.py

140 lines
4.6 KiB
Python

import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models, fitting
from sklearn.mixture import GaussianMixture
from sklearn.linear_model import LinearRegression
from .utils import get_hist
class Bind(object):
flag = 0
L, C = 40, 0
k1, k2, b = 0, 0, 0
K1, C1, K2, C2 = 0, 0, 0, 0
def __init__(self, n, m):
self.n, self.m = n, m
def add_data(self, E, data):
if self.flag == 0:
self.flag = 1
self.E1 = E
self.x1, self.y1 = data[:, 0].reshape(-1, 1), data[:, 1].reshape(-1, 1)
else:
self.E2 = E
self.x2, self.y2 = data[:, 0].reshape(-1, 1), data[:, 1].reshape(-1, 1)
def fit1(self):
self.reg1 = LinearRegression()
self.reg1.fit(self.x1, self.y1)
self.K1 = self.reg1.coef_[0][0]
self.C1 = self.reg1.intercept_[0]
self.reg2 = LinearRegression()
self.reg2.fit(self.x2, self.y2)
self.K2 = self.reg2.coef_[0][0]
self.C2 = self.reg2.intercept_[0]
def filter(self):
self.fit1()
ny1 = self.y1 - self.reg1.predict(self.x1)
ny2 = self.y2 - self.reg2.predict(self.x2)
idx1 = np.where(ny1 <= ny1.std())[0]
idx2 = np.where(ny2 <= ny2.std())[0]
self.x1 = self.x1[idx1]
self.y1 = self.y1[idx1]
self.x2 = self.x2[idx2]
self.y2 = self.y2[idx2]
def solve(self):
self.k1 = (self.E1 - self.E2) / (self.C1 - self.C2)
self.k2 = -(self.K1 + self.K2) * self.k1 / 2
self.b = (self.E1 - self.C1 * self.k1 + self.E2 - self.C2 * self.k1) / 2
def split(self, n=7):
self.clusters = []
data = np.concatenate((self.x1, self.y1), axis=1)
model = GaussianMixture(n_components=n)
model.fit(data)
ny = model.predict(data)
for i in np.unique(ny):
idx = np.where(ny == i)[0]
self.clusters.append(data[idx])
self.clusters = np.array(self.clusters, dtype=object)
self.clusters = sorted(self.clusters, key=lambda x: len(x))
def fit2(self):
self.fx = []
self.fy = []
fitter = fitting.LevMarLSQFitter()
for data in self.clusters:
x1, x2 = data[:, 0], data[:, 1]
c1, e1 = get_hist(x1)
c2, e2 = get_hist(x2)
modelx1 = models.Gaussian1D(amplitude=c1.max(), mean=x1.mean(), stddev=x1.std())
modelx2 = models.Gaussian1D(amplitude=c2.max(), mean=x2.mean(), stddev=x2.std())
fitted_model1 = fitter(modelx1, e1, c1)
fitted_model2 = fitter(modelx2, e2, c2)
self.fx.append(fitted_model1.mean.value)
self.fy.append(fitted_model2.mean.value)
idx = np.argsort(self.fx)
def fit3(self, y):
self.fx = np.sort(self.fx)[::-1].reshape(-1, 1)
self.reg3 = LinearRegression()
self.reg3.fit(self.fx, y)
self.K3 = self.reg3.coef_[0]
self.C3 = self.reg3.intercept_
def draw_fit1(self, title):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.scatter(self.x1, self.y1, s=0.1, c='black', label=r'$E_1$')
ax.scatter(self.x2, self.y2, s=0.1, c='dimgray', label=r'$E_2$')
ax.plot(self.x1, self.reg1.predict(self.x1), c='red', label=r'$x_2={:.4f}x_1+{:.4f},\ R^2={:.5f}$'.format(self.K1, self.C1, self.RSquare1))
ax.plot(self.x2, self.reg2.predict(self.x2), c='orangered', label=r'$x_2={:.4f}x_1+{:.4f},\ R^2={:.5f}$'.format(self.K2, self.C2, self.RSquare2))
ax.legend()
fig.savefig(title, facecolor='w', transparent=False)
plt.close()
def draw_split(self, title):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
for data in self.clusters:
ax.scatter(data[:, 0], data[:, 1], s=0.1)
fig.savefig(title, facecolor='w', transparent=False)
plt.close()
def draw_fit2(self, title):
k, n = 1, len(self.clusters)
fig = plt.figure(figsize=(8, 10))
for data in self.clusters:
ax = fig.add_subplot(n, 1, k)
x1, x2 = data[:, 0], data[:, 1]
c1, e1 = get_hist(x1)
c2, e2 = get_hist(x2)
ax.scatter(e1, c1, s=0.1)
ax.scatter(e2, c2, s=0.1)
k += 1
plt.tight_layout()
fig.savefig(title, facecolor='w', transparent=False)
plt.close()
@property
def name(self):
return '{:.1f}-{:.1f}-{:d}-{:d}'.format(self.E1, self.E2, self.n, self.m)
@property
def RSquare1(self):
return self.reg1.score(self.x1, self.y1)
@property
def RSquare2(self):
return self.reg2.score(self.x2, self.y2)