#ifndef BUILD_COVARIANCE_FUNCTOR_HEADER
#define BUILD_COVARIANCE_FUNCTOR_HEADER
#define ARD_PARAM_BUFFER_SIZE 256
#define EIGEN_USE_THREADS
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow{
REGISTER_OP("CovMatrixGenerator")
.Input("input_features: float32")
.Output("output_latent: float32")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
c->set_output(0, c->input(0));
return Status::OK();
});
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
template <typename D, typename T>
class CovMatrixGenerator : public OpKernel {
public:
explicit CovMatrixGenerator(OpKernelConstruction* context);
void Compute(OpKernelContext* context) override;
};
// Register the CPU kernels.
#define REGISTER_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("CovMatrixGenerator").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
CovMatrixGenerator<CPUDevice, T>);
REGISTER_CPU(float);
REGISTER_CPU(int32);
template<typename T, size_t dim>
struct ExpQuadParams {
T ardTheta[dim];
T theta;
const size_t dimensionality = dim;
EIGEN_DEVICE_FUNC
ExpQuadParams(T thetaInit, T ardThetaInit) {
for (size_t i = 0; i < dim; i++) {
ardTheta[i] = ardThetaInit;
}
theta = thetaInit;
}
ExpQuadParams() : ExpQuadParams(static_cast<T>(1), static_cast<T>(1)) {
//
}
};
template<typename D, typename T>
struct BuildCovarianceFunctor {
void operator()(const D &d, const T *data1, const T *data2, size_t size1,
size_t size_2, size_t dim, ExpQuadParams ¶ms, T *out);
};
template<typename T>
struct BuildCovarianceFunctor<CPUDevice, T> {
void operator()(const CPUDevice &deviceType, const T *data1, const T *data2, size_t size1,
size_t size_2, size_t dim, ExpQuadParams ¶ms, T *out);
};
}
#endif
我的实现在cpp文件中,省略了。您会注意到,我声明了一个结构ExpQuadParams
,它将被传递给我的BuildCovarianceFunctor
函子。我想做的是用TensorFlow将这个结构暴露给Python,这样我就可以填充Python环境中的数据成员,并将它们传递给C++实现。你知道吗
这是可能的吗?如果可能的话,如何使用张量流呢?Tensorflow内置了这种包装功能吗?你知道吗
目前没有回答
相关问题 更多 >
编程相关推荐