I am trying to render some images I obtain from my webcam and use them in my Tensorflow code. Yet the former step already fails for an unknown reason.
I get the following error messages when running my code:
Y: Tensor("layer2/Sigmoid:0", shape=(?, 84), dtype=float32)
2017-10-10 11:24:42.207249: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:857] ARM64 does not support NUMA - returning NUMA node zero
2017-10-10 11:24:42.207486: I tensorflow/core/common_runtime/gpu/gpu_device.cc:955] Found device 0 with properties:
name: GP10B
major: 6 minor: 2 memoryClockRate (GHz) 1.3005
pciBusID 0000:00:00.0
Total memory: 7.67GiB
Free memory: 3.22GiB
2017-10-10 11:24:42.207589: I tensorflow/core/common_runtime/gpu/gpu_device.cc:976] DMA: 0
2017-10-10 11:24:42.207663: I tensorflow/core/common_runtime/gpu/gpu_device.cc:986] 0: Y
2017-10-10 11:24:42.207736: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1045] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GP10B, pci bus id: 0000:00:00.0)
HIGHGUI ERROR: V4L/V4L2: VIDIOC_S_CROP
opened capture
read cap
(lousyTestCam.py:6194): Gtk-WARNING **: gtk_disable_setlocale() must be called before gtk_init()
(lousyTestCam.py:6194): Gtk-CRITICAL **: IA__gtk_type_unique: assertion 'GTK_TYPE_IS_OBJECT (parent_type)' failed
(lousyTestCam.py:6194): Gtk-CRITICAL **: IA__gtk_type_new: assertion 'GTK_TYPE_IS_OBJECT (type)' failed
(lousyTestCam.py:6194): Gtk-CRITICAL **: IA__gtk_type_unique: assertion 'GTK_TYPE_IS_OBJECT (parent_type)' failed
Segmentation fault (core dumped)
This is a piece of my code:
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
n_visible = 128
n_hidden = 84
def model(X, W, b, W_prime, b_prime):
with tf.name_scope("layer2"):
Y = tf.nn.sigmoid(tf.matmul(X, W) + b)
with tf.name_scope("layer3"):
Z = tf.nn.sigmoid(tf.matmul(Y, W_prime) + b_prime)
print("Y: "+str(Y))
return Z
X = tf.placeholder("float", [None, n_visible], name='X')
W_init_max = 4* np.sqrt(6. / (n_visible + n_hidden)) #standard formula
W_init = tf.random_uniform(shape=[n_visible, n_hidden],
minval =-W_init_max,
maxval =W_init_max )
W = tf.Variable(W_init, name='W')
b = tf.Variable(tf.zeros([n_hidden]), name='b' )
#weights between encoder and decoder
W_prime = tf.transpose(W)
b_prime = tf.Variable(tf.zeros([n_visible]), name='b_prime')
Z = model(X, W, b, W_prime, b_prime)
cost = tf.reduce_sum(tf.pow(X-Z, 2)) # const function (squared error), which we want to minimize to be more accurate
train_op = tf.train.GradientDescentOptimizer(0.02).minimize(cost) #training algorithm
#training data
trX = ....
resultPar = tf.placeholder(tf.float32)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
tf.global_variables_initializer().run()
cap = cv2.VideoCapture(0)
print("opened capture")
while True:
ret, frame = cap.read()
print("read cap")
if frame is not None:
cv2.imshow("input", frame) <----- doesn't work
print("showing img")
cv2.waitKey(100)
print("waiting")
cap.release()
cv2.destroyAllWindows()
Could someone explain what the matter is and why I can’t run my code but run in a segmentation fault every time?
Thanks