用张量流中的张量建立预测模型,无需输入d

2024-05-23 23:23:54 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在尝试连接CasADi和Tensorflow。CasADi是一个工具箱,它使用符号变量并进行自动微分。它通常用于动态/静态优化问题。你知道吗

我发现了一个使用GPflow(https://web.casadi.org/blog/tensorflow/)的示例。在这种情况下,GP模型首先用如下数据进行训练

data = np.random.normal(loc=0.5,scale=1,size=(N,nd))
value = np.random.random((N,1))

model = gpflow.models.GPR(data, value, gpflow.kernels.Constant(nd) + gpflow.kernels.Linear(nd) + gpflow.kernels.White(nd) + gpflow.kernels.RBF(nd))
gpflow.train.ScipyOptimizer().minimize(model)

然后建立预测模型,不传递实际值,只传递张量

X = tf.placeholder(shape=(1,nd),dtype=np.float64)
[mean,_] = model._build_predict(X)

这样CasADi就可以通过调用tensorflow的回调函数来替换实际值。你知道吗

我想使用tf.keras.顺序()模型而不是GPflow模型,因为我想实现一个递归神经网络。但对于序列模型,不存在建立预测(X)的方法。我试着使用just predict,但是我得到了以下错误

InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder' with dtype double and shape [35039,1,8]
     [[{{node Placeholder}}]]

你知道这种情况下的等价物是什么吗?你知道吗

下面是使用GPflow的完整代码

from casadi import *

T = 10. # Time horizon
N = 20 # number of control intervals

# Declare model variables
x1 = MX.sym('x1')
x2 = MX.sym('x2')
x = vertcat(x1, x2)
u = MX.sym('u')

# Model equations
xdot = vertcat((1-x2**2)*x1 - x2 + u, x1)


# Formulate discrete time dynamics
if False:
   # CVODES from the SUNDIALS suite
   dae = {'x':x, 'p':u, 'ode':xdot}
   opts = {'tf':T/N}
   F = integrator('F', 'cvodes', dae, opts)
else:
   # Fixed step Runge-Kutta 4 integrator
   M = 4 # RK4 steps per interval
   DT = T/N/M
   f = Function('f', [x, u], [xdot])
   X0 = MX.sym('X0', 2)
   U = MX.sym('U')
   X = X0
   Q = 0
   for j in range(M):
       k1 = f(X, U)
       k2 = f(X + DT/2 * k1, U)
       k3 = f(X + DT/2 * k2, U)
       k4 = f(X + DT * k3, U)
       X=X+DT/6*(k1 +2*k2 +2*k3 +k4)
   F = Function('F', [X0, U], [X],['x0','p'],['xf'])

# Start with an empty NLP
w=[]
w0 = []
lbw = []
ubw = []
g=[]
lbg = []
ubg = []

# "Lift" initial conditions
Xk = MX.sym('X0', 2)
w += [Xk]
lbw += [0, 1]
ubw += [0, 1]
w0 += [0, 1]

# Formulate the NLP
for k in range(N):
    # New NLP variable for the control
    Uk = MX.sym('U_' + str(k))
    w   += [Uk]
    lbw += [-1]
    ubw += [1]
    w0  += [0]

    # Integrate till the end of the interval
    Fk = F(x0=Xk, p=Uk)
    Xk_end = Fk['xf']

    # New NLP variable for state at end of interval
    Xk = MX.sym('X_' + str(k+1), 2)
    w   += [Xk]
    lbw += [-0.25, -inf]
    ubw += [  inf,  inf]
    w0  += [0, 0]

    # Add equality constraint
    g   += [Xk_end-Xk]
    lbg += [0, 0]
    ubg += [0, 0]

nd = N+1

import gpflow
import time

from tensorflow_casadi import TensorFlowEvaluator

class GPR(TensorFlowEvaluator):
  def __init__(self, model, session, opts={}):
    X = tf.placeholder(shape=(1,nd),dtype=np.float64)
    [mean,_] = model._build_predict(X)
    mean = tf.reshape(mean,(1,1))
    TensorFlowEvaluator.__init__(self,[X],[mean],session,opts)
    self.counter = 0
    self.time = 0

  def eval(self,arg):
    self.counter += 1
    t0 = time.time()
    ret = TensorFlowEvaluator.eval(self,arg)
    self.time += time.time()-t0
    return [ret]

# Create
np.random.seed(0)
data = np.random.normal(loc=0.5,scale=1,size=(N,nd))
value = np.random.random((N,1))

model = gpflow.models.GPR(data, value, gpflow.kernels.Constant(nd) + gpflow.kernels.Linear(nd) + gpflow.kernels.White(nd) + gpflow.kernels.RBF(nd))
gpflow.train.ScipyOptimizer().minimize(model)



import tensorflow as tf
with tf.Session() as session:
  model.initialize()
  GPR = GPR(model, session)

  w = vertcat(*w)

  # Create an NLP solver
  prob = {'f': GPR(w[0::3]), 'x': w , 'g': vertcat(*g)}
  options = {"ipopt": {"hessian_approximation": "limited-memory"}}
  solver = nlpsol('solver', 'ipopt', prob,options);

  # Solve the NLP
  sol = solver(x0=w0, lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg)

print("Ncalls",GPR.counter)
print("Total time [s]",GPR.time)
w_opt = sol['x'].full().flatten()

# Plot the solution
x1_opt = w_opt[0::3]
x2_opt = w_opt[1::3]
u_opt = w_opt[2::3]

tgrid = [T/N*k for k in range(N+1)]
import matplotlib.pyplot as plt
plt.figure(1)
plt.clf()
plt.plot(tgrid, x1_opt, '--')
plt.plot(tgrid, x2_opt, '-')
plt.step(tgrid, vertcat(DM.nan(1), u_opt), '-.')
plt.xlabel('t')
plt.legend(['x1','x2','u'])
plt.grid()
plt.show()

以及类TensorFlowEvaluator

import casadi
import tensorflow as tf

class TensorFlowEvaluator(casadi.Callback):
  def __init__(self,t_in,t_out,session, opts={}):
    """
      t_in: list of inputs (tensorflow placeholders)
      t_out: list of outputs (tensors dependeant on those placeholders)
      session: a tensorflow session
    """
    casadi.Callback.__init__(self)
    assert isinstance(t_in,list)
    self.t_in = t_in
    assert isinstance(t_out,list)
    self.t_out = t_out
    self.construct("TensorFlowEvaluator", opts)
    self.session = session
    self.refs = []

  def get_n_in(self): return len(self.t_in)
  def get_n_out(self): return len(self.t_out)

  def get_sparsity_in(self,i):
      return casadi.Sparsity.dense(*self.t_in[i].get_shape().as_list())

  def get_sparsity_out(self,i):
      return casadi.Sparsity.dense(*self.t_out[i].get_shape().as_list())

  def eval(self,arg):
    # Associate each tensorflow input with the numerical argument passed by CasADi
    d = dict((v,arg[i].toarray()) for i,v in enumerate(self.t_in))
    # Evaluate the tensorflow expressions
    ret = self.session.run(self.t_out,feed_dict=d)
    return ret

  # Vanilla tensorflow offers just the reverse mode AD
  def has_reverse(self,nadj): return nadj==1
  def get_reverse(self,nadj,name,inames,onames,opts):
    # Construct tensorflow placeholders for the reverse seeds
    adj_seed = [tf.placeholder(shape=self.sparsity_out(i).shape,dtype=tf.float64) for i in range(self.n_out())]
    # Construct the reverse tensorflow graph through 'gradients'
    grad = tf.gradients(self.t_out, self.t_in,grad_ys=adj_seed)
    # Create another TensorFlowEvaluator object
    callback = TensorFlowEvaluator(self.t_in+adj_seed,grad,self.session)
    # Make sure you keep a reference to it
    self.refs.append(callback)

    # Package it in the nominal_in+nominal_out+adj_seed form that CasADi expects
    nominal_in = self.mx_in()
    nominal_out = self.mx_out()
    adj_seed = self.mx_out()
    return casadi.Function(name,nominal_in+nominal_out+adj_seed,callback.call(nominal_in+adj_seed),inames,onames)


if __name__=="__main__":
  from casadi import *

  a = tf.placeholder(shape=(2,2),dtype=tf.float64)
  b = tf.placeholder(shape=(2,1),dtype=tf.float64)

  y = tf.matmul(tf.sin(a), b)

  with tf.Session() as session:
    f_tf = TensorFlowEvaluator([a,b], [y], session)

    a = MX.sym("a",2,2)
    b = MX.sym("a",2,1)
    y = f_tf(a,b)
    yref = mtimes(sin(a),b)

    f = Function('f',[a,b],[y])
    fref = Function('f',[a,b],[yref])

    print(f(DM([[1,2],[3,4]]),DM([[1],[3]])))
    print(fref(DM([[1,2],[3,4]]),DM([[1],[3]])))

    f = Function('f',[a,b],[jacobian(y,a)])
    fref = Function('f',[a,b],[jacobian(yref,a)])
    print(f(DM([[1,2],[3,4]]),DM([[1],[3]])))
    print(fref(DM([[1,2],[3,4]]),DM([[1],[3]])))


我的尝试是:

# design network
model = tf.keras.Sequential()
LSTM = tf.keras.layers.LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))
model.add(LSTM) #, input_shape=(train_X.shape[1], train_X.shape[2]))
model.add(tf.keras.layers.Dense(1))
model.compile(loss='mae', optimizer='adam')
# fit network
history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=0, shuffle=False)

with tf.Session() as session:
  testXshape = test_X.shape
  GPR = GPR(model, session,testXshape)

谢谢!你知道吗


Tags: theinselfmodelsessiontftensorflowout
1条回答
网友
1楼 · 发布于 2024-05-23 23:23:54

我已经让TensorFlowEvaluator使用相同的方法创建了GPR类:

class ValFcn(TensorFlowEvaluator):
import tensorflow as tf
def __init__(self, NN, session, opts={}):
    self.X = self.tf.placeholder(shape=(1,4), dtype=self.tf.float32)
    self.output = NN(self.X)
    TensorFlowEvaluator.__init__(self, [self.X], [self.output], session, opts)

def eval(self, arg):
    ret = TensorFlowEvaluator.eval(self, arg)
    return ret

我在使用float32,所以我必须在那里和TensorFlowEvaluator中更改它。你知道吗

我实际上用这个模型作为OCP的成本函数。你知道吗

希望有用!你知道吗

相关问题 更多 >