diff --git a/.idea/BFPy.iml b/.idea/BFPy.iml deleted file mode 100644 index add18e5..0000000 --- a/.idea/BFPy.iml +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml deleted file mode 100644 index 2434358..0000000 --- a/.idea/misc.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml deleted file mode 100644 index 756446c..0000000 --- a/.idea/modules.xml +++ /dev/null @@ -1,8 +0,0 @@ - - - - - - - - \ No newline at end of file diff --git a/.idea/other.xml b/.idea/other.xml deleted file mode 100644 index 652bf24..0000000 --- a/.idea/other.xml +++ /dev/null @@ -1,7 +0,0 @@ - - - - - \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml deleted file mode 100644 index 94a25f7..0000000 --- a/.idea/vcs.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/kemitter/basis/builders/__init__.py b/kemitter/basis/builders/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/kemitter/basis/builders/ediso.py b/kemitter/basis/builders/ediso.py deleted file mode 100644 index 46a77eb..0000000 --- a/kemitter/basis/builders/ediso.py +++ /dev/null @@ -1,83 +0,0 @@ -import numpy as np -from scipy.sparse import csc_matrix, hstack -from numba import vectorize -from ..fields import field - - -class EDIsoBuilder: - """ - :type basis_parameters: BasisParameters - """ - - def __init__(self, basis_parameters): - self.__bp = basis_parameters - self.field_set = field.Field(basis_parameters) - - def build(self): - self.field_set.calculate_fields(["ED", "MD"]) - ed_isometric = isometric_emission(self.__bp.pol_angle, - self.field_set.xpol.ED.x, self.field_set.ypol.ED.x, - self.field_set.xpol.ED.y, self.field_set.ypol.ED.y, - self.field_set.xpol.ED.z, self.field_set.ypol.ED.z) - md_isometric = isometric_emission(self.__bp.pol_angle, - self.field_set.xpol.MD.x, self.field_set.ypol.MD.x, - self.field_set.xpol.MD.y, self.field_set.ypol.MD.y, - self.field_set.xpol.MD.z, self.field_set.ypol.MD.z) - # for each ed / md - # reshape so that each wavelength patter is vectorized (ux_count*uy_count, wavelength_count) (each column a wavelength) - # load into sparse matrix with spatial offset - ed_isometric = self.sparse_column_major_offset(ed_isometric) - md_isometric = self.sparse_column_major_offset(md_isometric) - # concatenate ED and MD sparse matrices - basis = hstack([ed_isometric, md_isometric]) - # trim basis based on lambda range TODO: work on x-y flipping - if self.__bp.trim_w: - basis = self.basis_trim(basis) - # return the single sparse matrix - return basis - - - def sparse_column_major_offset(self, matrix): - # flatten data matrix by column - flat_matrix = np.reshape(matrix, (self.__bp.ux_count*self.__bp.uy_count*self.__bp.wavelength_count, ), order='F') - # flat_matrix.flatten('F') - # generate sparse rows and columns - rows = sparse_rows(self.__bp.ux_count, self.__bp.uy_count, self.__bp.wavelength_count) - cols = sparse_cols(self.__bp.ux_count, self.__bp.uy_count, self.__bp.wavelength_count) - # load into sparse matrix - return csc_matrix((flat_matrix, (rows, cols))) - - def basis_trim(self, matrix): - begin_ind = int(self.__bp.uy_count * np.floor(self.__bp.ux_count/2)) - if self.__bp.pad_w: - begin_ind += int(self.__bp.uy_count * np.floor((self.__bp.ux_count - 1)/2)) - - final_pix_row_ind = matrix.shape[0] - 1 - end_ind = int(final_pix_row_ind - self.__bp.uy_count * np.floor((self.__bp.ux_count - 1)/2)) - if self.__bp.pad_w: - end_ind -= int(self.__bp.uy_count * np.floor(self.__bp.ux_count/2)) - - return matrix[begin_ind:(end_ind+1), :] - - -def sparse_rows(ux_count, uy_count, wavelength_count): - single_wavelength_rows = np.arange(0, ux_count*uy_count) - offset = np.arange(0,uy_count*wavelength_count,uy_count) - rows = single_wavelength_rows.reshape(ux_count*uy_count, 1) + offset - return rows.flatten('F').astype(int) - -def sparse_cols(ux_count, uy_count, wavelength_count): - single_wavelength_cols = np.arange(0, wavelength_count) - return np.repeat(single_wavelength_cols, ux_count*uy_count).astype(int) - - -@vectorize("float64(float64,complex128,complex128,complex128,complex128,complex128,complex128)", - target='parallel', nopython=True) -def isometric_emission(pol_angle, xDx, yDx, xDy, yDy, xDz, yDz): - isometric = np.square(np.abs(np.cos(pol_angle) * yDx + - np.sin(pol_angle) * xDx)) + \ - np.square(np.abs(np.cos(pol_angle) * yDy + - np.sin(pol_angle) * xDy)) + \ - np.square(np.abs(np.cos(pol_angle) * yDz + - np.sin(pol_angle) * xDz)) - return isometric \ No newline at end of file diff --git a/kemitter/boyd_cgls.py b/kemitter/boyd_cgls.py deleted file mode 100644 index 8082652..0000000 --- a/kemitter/boyd_cgls.py +++ /dev/null @@ -1,57 +0,0 @@ -import tensorflow as tf -import numpy as np - -def cg_solve(A, b, x_init, tol=1e-1, max_iter=1000): - delta = tol*tf.norm(b) - - def body(x, k, r_norm_sq, r, p): - Ap = A(p) - alpha = r_norm_sq / tf.matmul(p, Ap, transpose_a=True) - x = x + alpha*p - r = r - alpha*Ap - r_norm_sq_prev = r_norm_sq - r_norm_sq = tf.matmul(r,r, transpose_a=True) - beta = r_norm_sq / r_norm_sq_prev - p = r + beta*p - return (x, k+1, r_norm_sq, r, p) - - def cond(x, k, r_norm_sq, r, p): - return tf.logical_and(tf.reshape(tf.sqrt(r_norm_sq) > delta, []), k < max_iter) - - - r = b - A(x_init) - loop_vars = (x_init, tf.constant(0), - tf.matmul(r, r, transpose_a=True), r, r) - return tf.while_loop(cond, body, loop_vars)[:3] - - -def main(): - m = 5 - n = 5 - np.random.seed(0) - A_np = np.random.randn(m, n) - b_np = np.random.randn(m, 1) - x_init = np.zeros((n, 1)) - shift = 1 - - expected_x = np.linalg.solve(A_np, b_np) - print('solved np') - A0 = tf.convert_to_tensor(A_np, dtype=tf.float64) - b0 = tf.convert_to_tensor(b_np, dtype=tf.float64) - x_init0 = tf.convert_to_tensor(x_init, dtype=tf.float64) - - def A(x): - return tf.matmul(A0, x) - - def AT(x): - return tf.matmul(A0, x, transpose_a=True) - - solver = cg_solve(A, b0, x_init0) - with tf.Session() as sess: - print('solving tf...') - sess_np = sess.run(solver) - print(expected_x, sess_np[0]) - print(sess_np[2]) - -if __name__ == "__main__": - main() diff --git a/kemitter/client.py b/kemitter/client.py deleted file mode 100644 index b58a620..0000000 --- a/kemitter/client.py +++ /dev/null @@ -1,145 +0,0 @@ -#!/usr/bin/env python -import sys - -from kemitter.obsrv import observation -from kemitter.basis import basis - - -class BFPSession(object): - - def __init__(self, verbose=True, pol_children=None): - if pol_children is None: - pol_children = [] - self.__pol_children = pol_children # PolDataSet - self.model = None # cvxpy Model or subclass of such - self.vis = None # PlotSet - self.verbose = verbose - - @property - def polarization_angles(self): - angles = [] - for child in self.__pol_children: - angles.append(child.pol_angle) - return angles - - @property - def n_polarizations(self): - return len(self.__pol_children) - - def data_set(self, pol_angle): - for child in self.__pol_children: - if child.pol_angle == pol_angle: - return child - raise ValueError('No child with polarization angle {0}'.format(pol_angle)) - - def remove_data_set(self, pol_angle): - child = self.data_set(pol_angle) - self.__pol_children.remove(child) - - def is_empty(self): - return not self.__pol_children - - def is_defined(self): - pass - - def reset(self): - yes = ['y', 'yes'] - no = ['n', 'no', ''] - sys.stdout.write('WARNING: This action will clear all loaded data, calculated bases, and models.\n' - ' Would you still like to proceed? [y/N] ') - choice = input().lower() - if choice in yes: - self.__pol_children.clear() - self.model = None - self.basis_parameters = None - self.vis = None - elif choice in no: - return - else: - print('Invalid answer. Exiting without resetting...') - return - - def load(self, pol_angle): - # check if polarization has already been loaded into session - if pol_angle in self.polarization_angles: - yes = ['y', 'yes'] - no = ['n', 'no', ''] - sys.stdout.write('WARNING: An Observation with polarization angle {0} is already defined.\n' - ' Would you like to overwrite this observation? [y/N] '.format(pol_angle)) - choice = input().lower() - if choice in yes: - self.remove_data_set(pol_angle) - elif choice in no: - return - else: - print('Invalid answer. Exiting without overwriting...') - return - # create new polarized data set - active_data_set = PolDataSet(pol_angle) - # launches interactive loading - success = active_data_set.observation._load() - if success: - print('Successfully loaded {0} deg. data.'.format(pol_angle)) - self.__pol_children.append(active_data_set) - else: - print('Unable to load observation.') - - def load_from_file(self, pol_angle, filename): - # make and append PolDataSet and load - pass - - def load_from_array(self, pol_angle, numpy_data): - # make and append PolDataSet and load - pass - - def define_fit(self, model, **kwargs): - # sets model - pass - - def run(self, open_slit, pad_lambda, fit_all_frames, debug=True): - pass - - def build_bases(self): - for angle in self.polarization_angles: - if self.data_set(angle).basis.is_defined: - self.data_set(angle).basis.build() - - def visualize(self): - pass - - -class PolDataSet(object): - - def __init__(self, pol_angle=None): - self.observation = observation.Observation(pol_angle) - self.basis = basis.Basis(pol_angle) - self.pol_angle = pol_angle - - def define_basis(self, basis_type, n0, n1, n2o, n2e, n3, - ux_range, uy_range, ux_count, uy_count, - d, s, l, pad_w=False, trim_w=True): - """ - :type basis_type: str - :type n0: float - :type n1: float - :type n2o: float - :type n2e: float - :type n3: float - :type ux_range: tuple - :type uy_range: tuple - :type ux_count: int - :type uy_count: int - :type d: float - :type s: float - :type l: float - :type pad_w: bool - :type trim_w: bool - """ - self.basis.basis_parameters = basis.BasisParameters(basis_type, - n0, n1, n2o, n2e, n3, - ux_range, uy_range, - ux_count, uy_count, - d, s, l, - self.pol_angle) - if self.observation.loaded: - self.basis.basis_parameters.set_wavelength(self.observation.wavelength) diff --git a/kemitter/fit_prototype.py b/kemitter/fit_prototype.py deleted file mode 100644 index 6ca93d0..0000000 --- a/kemitter/fit_prototype.py +++ /dev/null @@ -1,90 +0,0 @@ -import kemitter -import kemitter.vis.visualization as bfpvis -import numpy as np -import scipy.sparse as sp -import cvxpy as cvx -import time -import pickle -import matplotlib.pyplot as plt - -# FIRST SUCCESSFUL FIT - Time 2 minutes - see pickled files for quick loading and result. -# Ridge Regression implemented - Time ~ 3 minutes (longer setup time) -sess = kemitter.BFPSession() - -sess.load(90) -sess.load(0) - -sess.data_set(90).define_basis(basis_type='EDIso',n0=1.0, n1=1.0, n2o=1.7, n2e=1.7, n3=1.5, - ux_range=(-1.3,1.3), uy_range=(-1.3,1.3), - ux_count=180, uy_count=180, - d=10.0, s=10.0, l=0.0, pad_w=False, trim_w=True) -sess.data_set(0).define_basis(basis_type='EDIso',n0=1.0, n1=1.0, n2o=1.7, n2e=1.7, n3=1.5, - ux_range=(-1.3,1.3), uy_range=(-1.3,1.3), - ux_count=180, uy_count=180, - d=10.0, s=10.0, l=0.0, pad_w=False, trim_w=True) - -sess.build_bases() - -# with open('test_sess.p', 'wb') as f: -# pickle.dump(sess, f) -# with open('./test_sess.p', 'rb') as f: -# sess = pickle.load(f) -print('loaded') -A = sp.vstack([sess.data_set(0).basis.basis_matrix, sess.data_set(90).basis.basis_matrix]) - -basis_rows = A.shape[0] -n = A.shape[1] + 1 - -o = sp.csc_matrix((np.ones(basis_rows,),(np.arange(basis_rows), np.zeros(basis_rows,)))) -A = sp.hstack((A, o)) -H = cvx.Constant(A) -D = np.diag(np.ones((A.shape[1] - 1,)), k=1) - np.diag(np.ones((A.shape[1],))) -D = cvx.Constant(D[:-1, :]) - -b = np.vstack((sess.data_set(0).observation.data.reshape((180*1024,1), order='F'), - sess.data_set(90).observation.data.reshape((180*1024,1), order='F'))) -b = cvx.Constant(b) -# plt.imshow(sess.data_set(90).observation.data) -# plt.show() -# bfpvis.basis_func_plot(sess.data_set(90).basis, 179) -### MAKE b from basis directly just to test -# x_test = np.random.rand(n, 1) -# b = A.todense() @ x_test -# plt.imshow(b.reshape((180,1024),order='F')) -# plt.show() - -alpha = 0.025 -x = cvx.Variable(n) -print('variables and constants made') -print('defining objective') -objective = cvx.Minimize(cvx.norm2(H*x - b) + alpha*cvx.norm2(D*x)) -print('defining constraints') -constraints = [x[:-1] >= 0] -print('defining problem') -prob = cvx.Problem(objective, constraints) -print('\nSolving...') -t0 = time.time() -result = prob.solve(solver=cvx.MOSEK, verbose=True) -t1 = time.time() -with open('test_res.p', 'wb') as f: - pickle.dump(x, f) - -print('Done in {0:.3f} seconds.'.format(t1-t0)) -# NORM2 worked! 2 minutes for solve!!! - -# Problem data. -# m = 100 -# n = 20 -# np.random.seed(1) -# A = np.random.randn(m, n) -# b = np.random.randn(m, 1) -# -# # Construct the problem. -# x = Variable(n) -# objective = Minimize(sum_entries(square(A*x - b))) -# constraints = [0 <= x] -# prob = Problem(objective, constraints) -# -# print("Optimal value", prob.solve(solver=MOSEK, verbose=True)) -# print("Optimal var") -# print(x.value) # A numpy matrix. \ No newline at end of file diff --git a/kemitter/tf_test.py b/kemitter/tf_test.py deleted file mode 100644 index fd03f53..0000000 --- a/kemitter/tf_test.py +++ /dev/null @@ -1,72 +0,0 @@ -import pickle -import numpy as np -import tensorflow as tf -import scipy.sparse as sp -import sys -import matplotlib.pyplot as plt - - -print('Python %s on %s' % (sys.version, sys.platform)) -sys.path.extend(['C:\\Users\\Alex\\Documents\\Brown\\Thesis\\untitled', 'C:/Users/Alex/Documents/Brown/Thesis/untitled']) - -with open('test_sess.p', 'rb') as f: - sess = pickle.load(f) -obs = sess.data_set(90).observation.data.reshape((180*1024,1), order='F') - -# def convert_sparse_matrix_to_sparse_tensor(X): -# coo = X.tocoo() -# indices = np.mat([coo.row, coo.col]).transpose() -# return tf.SparseTensor(indices, coo.data, coo.shape) - -basis_rows = sess.data_set(90).basis.basis_matrix.shape[0] -n = sess.data_set(90).basis.basis_matrix.shape[1] + 1 - -o = sp.csc_matrix((np.ones(basis_rows,),(np.arange(basis_rows), np.zeros(basis_rows,)))) -A_sp = sp.hstack((sess.data_set(90).basis.basis_matrix, o)).todense() -# A_sp = A_sp.tocoo() -# indices = np.mat([A_sp.row, A_sp.col]).transpose() - -# A_tensor = convert_sparse_matrix_to_sparse_tensor(A_sp) -x = tf.Variable(np.random.rand(2049,1), dtype=tf.float64) - -# sp_indices = tf.placeholder(tf.int64, [None, 2]) -# sp_values = tf.placeholder(tf.float64) -# sp_shape = tf.placeholder(tf.int64, [2, ]) -# A = tf.SparseTensor(sp_indices, sp_values, sp_shape) -A = tf.placeholder(tf.float64, shape=[180*1024, 2049]) -b = tf.placeholder(tf.float64, shape=[180*1024, 1]) -print('making recon') -reconstruction = tf.matmul(A, tf.abs(x)) -print('making loss') -loss = tf.norm(reconstruction - b, 2) -train_step = tf.train.GradientDescentOptimizer(0.05).minimize(loss) - -session = tf.Session() -session.run(tf.initialize_all_variables()) - -print('training...') -for step in range(10): - c, loss_val, x_val = session.run([train_step, loss, x], feed_dict={A: A_sp, b: obs}) - print(step, loss_val) -with open('tf_x_val.p', 'wb') as f: - pickle.dump(x_val, f) -plt.plot(x_val[0:1024]) - -# x = tf.placeholder(tf.float32, [None, 1]) -# y_ = tf.placeholder(tf.float32, [None, 1]) -# -# b = tf.Variable(tf.zeros([1])) -# w = tf.Variable(tf.zeros([1, 1])) -# -# y = w * x + b -# -# loss = tf.reduce_sum((y - y_) * (y - y_)) -# train_step = tf.train.GradientDescentOptimizer(0.005).minimize(loss) -# -# sess = tf.Session() -# sess.run(tf.initialize_all_variables()) -# -# for step in range(10): -# sess.run(train_step, feed_dict={x:[[2.3],[1.7],[-3.8],[0.5],[-4.1],[-1.5],[-2.5],[6.2]], -# y_:[[-4.4],[-3.6],[7.7],[-0.9],[8.3],[2.9],[4.9],[-12.2]]}) -# print(step, sess.run(w), sess.run(b)) \ No newline at end of file diff --git a/main.py b/main.py deleted file mode 100644 index 7941d63..0000000 --- a/main.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np -import kemitter -import pickle - -# h_obs = kemitter.Observation() -# h_obs.load() -# -# v_obs = kemitter.Observation() -# v_obs.load() -# -# h_basis = kemitter.basis.IsometricEmitter(pol_angle=90, n2=1.7, n3=1.5) -# v_basis = kemitter.basis.IsometricEmitter(pol_angle=0, n2=1.7, n3=1.5) -# -# with open('final_test_in.p', 'wb') as f: -# pickle.dump((h_basis, v_basis, h_obs, v_obs), f) - -with open('final_test_in.p', 'rb') as f: - h_basis, v_basis, h_obs, v_obs = pickle.load(f) - -# ========= TEST CODE HERE ========= -model = kemitter.model.Quadratic(alpha=0.025) -model.run([h_basis, v_basis], [h_obs, v_obs]) -# ================================== - -with open('final_test_sess.p', 'wb') as f: - pickle.dump(model, f)