Secure MPC: Gradient Descent

39 days ago by ktjell12@student.aau.dk

###################################################################################################### # This cell implementes most of the secure MPC funtions, used in the mater's thesis by Katrine Tjell.# # To name a few: # # - Shamir's Secret sharing scheme # # - multiplication protocol # # - Beavers triplet creation # # - Integer division # # - Comparison protocol # ###################################################################################################### import numpy as np import matplotlib.pylab as plt import csv import sys def basispoly(F, n): r = [] C = range(1,n+1) for i in range(1,n+1): c = [k for k in C if k != i] p = 1 for j in range(n-1): p *= -F(c[j]) / (F(i)-F(c[j])) r.append(F(p)); return r def secretsharing(F, x, t, n): shares = [] c = [F.random_element() for i in range(t)] for i in range(1, n+1): s = x for j in range(1,t+1): s = F(s) + F(c[j-1]) * F(i)**F(j) shares.append(s) return np.array(shares) def triplet(F, n, t): r = basispoly(F, n) matrixA = np.zeros((n,n)) matrixB = np.zeros((n,n)) for i in range(n): a = F.random_element() b = F.random_element() matrixA[:,i] = secretsharing(F, a, t, n) matrixB[:,i] = secretsharing(F, b, t, n) sharesA = np.sum(matrixA, 1) sharesA = [F(i) for i in sharesA] sharesB = np.sum(matrixB, 1) sharesB = [F(i) for i in sharesB] sharesC = [F(i*j) for (i,j) in zip(sharesA, sharesB)] return [sharesA, sharesB, sharesC] def mult(sx1,sx2): salpha, sbeta, sgamma = triplet(F, n, t) r = basispoly(F,len(sx1)) d = dot(F, r, [F(i-j) for (i,j) in zip(sx1, salpha)]) e = dot(F, r, [F(i-j) for (i,j) in zip(sx2, sbeta)]) return np.array([F(d*e + d*i + e*j + k) for (i,j,k) in zip(sbeta, salpha, sgamma)]) def inverse(x, r): b = basispoly(F,len(x)) w_s = [i*j for i,j in zip(x,r)] w = dot(F,b,w_s) if w == 0: w = F(1000) return w**(-1) * np.array(r) def down(F, q, k,t,n): basis = basispoly(F,n) #while True: r = F.random_element() #if r < k: # break r_T = np.trunc(np.array(r,dtype = int) *k**(-1)) r_s = secretsharing(F,r,t,n) r_Ts = secretsharing(F,F(r_T),t,n) z_s = q + r_s z = dot(F,basis,z_s) z_T = np.trunc(np.array(z,dtype = int) * k**(-1) ) z_Ts = secretsharing(F,F(z_T),t,n) q_tilde = z_Ts - r_Ts c = int( dot(F,basis,q) < dot(F,basis,q_tilde) *k) return q_tilde - c def dot(F, x, y): res = 0 for i in range(len(x)): res += F(x[i] * y[i]) return res ###############################MAIN ########################################## ##### CONSTANTS ################################ set_random_seed(1) np.random.seed(1) m = 1125899839733759 # Prime number, large enough that the probability of chossing a certain number is negeligable F = GF(m) # Finite field with m elements n = 4 # Number of parties t = 1 # Degree of polynomial r = basispoly(F,n) # Lagrange basispolynomial coefficients 
       
##################################################################################################### # This cell implementes method that are more specific to the METHODS OF GRADIENT DESCENT. # # The function which is minimized is on the form f(x) = (x-[a])**2 . # ##################################################################################################### #Should and could be implemented securely, see the report def less(a,b): return int( dot(F,basispoly(F,n),a) < dot(F,basispoly(F,n),b) ) #Evalautes the function securely ( It is assumed that the form of f(x) is public, but the coefficients #are secret. That is, [x] is secret, and fs = [3] is secret. def f(x, fs): xf = x - fs return mult(xf,xf) 
       
##################################################################################################### # This cell implementes the method of GRADIENT DESCENT as a Secure MPC protocol. # # It uses the functions in cell 1, therefore this should run before attempting to run this cell. # # In regards to Protocol 3.8 in the report, this function implements fixed stepsize and scaling. # # The function which is minimized is f(x) = (x-[3])**2 . # ##################################################################################################### set_random_seed(1) n = 5 t = 1 basis = basispoly(F,n) fs = secretsharing(F,3,t,n) M = 2**15 #Scaling constant start_val = 4 #Start value x0 = secretsharing(F,start_val,t,n) cur_x = M*x0 gamma = 10 #Fixed step size xs = [] rounds = 50 for i in range(rounds): xs.append(np.array(dot(F,basis,cur_x),dtype = float) /M) prev_x = cur_x x_d = down(F, prev_x, M,t,n) g = f(x_d + 1,fs) - f(x_d,fs) dg = (g*int(M/gamma)) cur_x -= dg print 'Squared error between estimate and true optimum x*' e = (np.array(xs) - 3)**2 list_plot(e) 
       
Squared error between estimate and true optimum x*
Squared error between estimate and true optimum x*
##################################################################################################### # This cell implementes the method of GRADIENT DESCENT as a Secure MPC protocol. # # It uses the functions in cell 1, therefore this should run before attempting to run this cell. # # In regards to Protocol 3.8 in the report, this function implements fixed stepsize, but NO scaling.# # The function which is minimized is f(x) = (x-[3])**2 . # ##################################################################################################### set_random_seed(1) n = 5 #Number of parties t = 1 #Number of corrupted parties basis = basispoly(F,n) #recombination vector - Lagrange basis polynomial fs = secretsharing(F,3,t,n) #Secret coefficient of the function to minimize. start_val = 20 #Start value x0 = secretsharing(F,start_val,t,n) #Secret start value cur_x = x0 gamma = 3 #Fixed stepsize xs = [] rounds = 50 for i in range(rounds): xs.append(np.array(dot(F,basis,cur_x),dtype = float)) x_d = cur_x g = f(x_d + 1,fs) - f(x_d,fs) dg = down(F, g, gamma,t,n) #(g*int(gamma)) cur_x -= dg print 'Squared error between estimate and true optimum x*' e = (np.array(xs) - 3)**2 list_plot(e) 
       
Squared error between estimate and true optimum x*
Squared error between estimate and true optimum x*
##################################################################################################### # This cell implementes the method of GRADIENT DESCENT as a Secure MPC protocol. # # It uses the functions in cell 1, therefore this should run before attempting to run this cell. # # In regards to Protocol 3.8 in the report, this function implements backtracking line search # # to determine the stepsize and NO scaling. # # The function which is minimized is f(x) = (x-[3])**2 . # ##################################################################################################### set_random_seed(1) n = 5 #Number of parties t = 1 #Number of corrupted parties basis = basispoly(F,n) #recombination vector - Lagrange basis polynomial fs = secretsharing(F,3,t,n) #Secret coefficient of the function to minimize. start_val = 20 #Start value x0 = secretsharing(F,start_val,t,n) #Secret start value cur_x = x0 xs = [] rounds = 10 for i in range(rounds): xs.append(np.array(dot(F,basis,cur_x),dtype = float)) x_d = cur_x g = f(x_d + 1,fs) - f(x_d,fs) R = less(f(x_d + 1,fs) , f(x_d,fs)) S = R * (-1) + (1-R) * 1 gamma = 1 T = f(x_d, fs) - g K = less(-T, T) H = f(x_d - g, fs) G = less(T,H) G = G or K while G: gamma += 1 T = f(x_d, fs) - S*down(F,S*g, gamma,t,n) K = less(-T, T) H = f(x_d - S*down(F, S*g, gamma,t,n), fs) G = less(T,H) G = G or K dg = S*down(F, S*g, gamma,t,n) #(g*int(gamma) cur_x -= dg print 'Squared error between estimate and true optimum x*' e = (np.array(xs) - 3)**2 list_plot(e) 
       
Squared error between estimate and true optimum x*
Squared error between estimate and true optimum x*
##################################################################################################### # This cell implementes the NON-SECURE method of GRADIENT DESCENT. # # The function which is minimized is f(x) = (x-[3])**2 . # ##################################################################################################### import numpy as np #Function to minimize def f(x): return (x-3)**2 #Gradient def gf(x): return 2*x-6 gamma = 1/3. x0 = 20 xs = [] cur_x = x0 xs = [] for i in range(51): xs.append(cur_x) prev_x = cur_x g = gf(prev_x) #(f(prev_x + h) - f(prev_x))/h# #gamma = 1 #while f(prev_x - g*gamma) > f(prev_x) - 0.1*g**2*gamma: # gamma = gamma * 0.5 cur_x += (-g*gamma) step = abs(cur_x - prev_x) print 'Squared error between estimate and true optimum x*' e = (np.array(xs) - 3)**2 list_plot(e) 
       
Squared error between estimate and true optimum x*
Squared error between estimate and true optimum x*