![](/img/trans.png)
[英]ValueError: Error when checking input: expected conv2d_input to have 4 dimensions, but got array with shape
[英]ValueError: Error when checking input: expected conv2d_input to have 4 dimensions, but got array with shape (None, 1)
我完成了我的 model 的訓練,它由 20 個類組成,准確度達到 0.9993,目前正在進行測試。 我正在關注本教程,但我遇到了錯誤
prediction = model.predict(['test1.jpg'])
訓練數據定義為
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE,1)
這是我對cnn的定義
x = pickle.load(open("x.pickle", "rb" ))
y = pickle.load(open("y.pickle", "rb"))
x = x/255.0
model = Sequential()
model.add(Conv2D(64,(3,3), input_shape = x.shape[1:IMG_SIZE]))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(64,(3,3), input_shape = x.shape[1:IMG_SIZE]))
model.add(Activation("relu"))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Dense(20))
model.add(Activation("sigmoid"))
這里也是我對 model 的總結
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 222, 222, 64) 640
_________________________________________________________________
activation (Activation) (None, 222, 222, 64) 0
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 111, 111, 64) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 109, 109, 64) 36928
_________________________________________________________________
activation_1 (Activation) (None, 109, 109, 64) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 54, 54, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 186624) 0
_________________________________________________________________
dense (Dense) (None, 64) 11944000
_________________________________________________________________
dense_1 (Dense) (None, 20) 1300
_________________________________________________________________
activation_2 (Activation) (None, 20) 0
=================================================================
Total params: 11,982,868
Trainable params: 11,982,868
Non-trainable params: 0
_________________________________________________________________
我得到的錯誤是
-------------------------------------------------- ------------------------- ValueError Traceback (最近一次調用最后一次) in ----> 1 prediction = model.predict(['test1. jpg'])
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing) 907 max_queue_size=max_queue_size , 908 工人=工人,--> 909 use_multiprocessing=use_multiprocessing) 910 911 def reset_metrics(self):
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in predict(self, model, x, batch_size, verbose, steps, callbacks, **kwargs) 460 return self. _model_iteration(461 model,ModeKeys.PREDICT,x=x,batch_size=batch_size,詳細=詳細,--> 462 步=步驟,回調=回調,**kwargs)463 464
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _model_iteration(self, model, mode, x, y, batch_size, verbose, sample_weight, steps, callbacks, ** kwargs)442 mode=mode,443 training_context=training_context,--> 444 total_epochs=1)445 cbks.make_logs(模型,epoch_logs,結果,模式)446
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in run_one_epoch(模型,迭代器,execution_function,dataset_size,batch_size,strategy,steps_per_epoch,num_samples,mode,training_context,total_epochs) 121 step=step, mode=mode, size=current_batch_size) as batch_logs: 122 try: --> 123 batch_outs = execution_function(iterator) 124 except (StopIteration, errors.OutOfRangeError): 125 # TODO(kaftan): File bug about tf function 和errors.OutOfRangeError?
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in execution_function(input_fn) 84 #
numpy
在 Eager 模式下將張量轉換為值。 85 返回nest.map_structure(_non_none_constant_value, ---> 86 分布式函數(input_fn)) 87 88 返回執行函數~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\def_function.py in call (self, *args, **kwds) 455 456 tracking_count = self._get_tracing_count() --> 457 結果 = self._call(*args, **kwds) 458 if tracking_count == self._get_tracing_count(): 459 self._call_counter.called_without_tracing()
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\def_function.py in _call(self, *args, **kwds) 501 # 這是第一次調用call ,所以我們要初始化. 502 initializer_map = object_identity.ObjectIdentityDictionary() --> 503 self._initialize(args, kwds, add_initializers_to=initializer_map) 504 finally: 505 #此時我們知道初始化完成(或更少
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to) 406 self._concrete_stateful_fn = (407 self._stateful_fn._get_concrete_function_internal_garbage_collected(
pylint:禁用=受保護的訪問
--> 408 *args, **kwds)) 409 410 def invalid_creator_scope(*unused_args, **unused_kwds):
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs) 1846 if self.input_signature: 1847 args, kwargs = None, None - > 1848 圖函數,_,_ = self._maybe_define_function(args,kwargs)1849 返回圖函數 1850
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\function.py in _maybe_define_function(self, args, kwargs) 2148
graph_function = self._function_cache.primary.get(cache_key, None)
2149 如果 graph_function 為 None:-> 2150 graph_function = self._create_graph_function(args, kwargs) 2151 self._function_cache.primary[cache_key] = graph_function 2152 return graph_function, args, kwargs~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes) 2039 arg_names=arg_names,
2040 override_flat_arg_shapes=override_flat_arg_shapes, -> 2041 capture_by_value=self._capture_by_value), 2042 self._function_attributes, 2043 # 告訴 ConcreteFunction 一旦它退出就清理它的圖~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, Z0B9ABFE67CC31FCF11ECD022EB195A, capture_by_value, override_flat_arg_shapes) 913 convert_func) 914 --> 915 func_outputs = python_func(*func_args, **func_kwargs) 916 917 # 不變:
func_outputs
只包含張量,復合張量,~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\eager\def_function.py in Wrapped_fn(*args, **kwds) 356 # Wrapped允許 AutoGraph 交換轉換后的 function。 我們給 357# function 一個對自身的弱引用以避免引用循環。 --> 358 返回weak_wrapped_fn()。 包裝(*args,**kwds)359 weak_wrapped_fn = weakref.ref(wrapped_fn)360
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py indistributed_function(input_iterator) 71 strategy = distribution_strategy_context.get_strategy() 72 outputs = strategy.experimental_run_v2( ---> 73 per_replica_function, args=(model, x, y, sample_weights)) 74 # PerReplica 輸出減少或選擇要返回的值。 75 all_outputs = dist_utils.unwrap_output_dict(
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in experimental_run_v2(self,fn,args,kwargs)758 fn = autograph.tf_convert(fn,ag_ctx.control_status_ctx(),759 convert_by_default=False) --> 760 return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs) 761 762 def reduce(self, reduce_op, value, axis):
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in call_for_each_replica(self, fn, args, kwargs) 1785 kwargs = {} 1786 with self._container_strategy().scope() : -> 1787 返回 self._call_for_each_replica(fn, args, kwargs) 1788 1789 def _call_for_each_replica(self, fn, args, kwargs):
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs) 2130
self._container_strategy(),2131
replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)): -> 2132 return fn(*args, **kwargs) 2133 2134 def _reduce_to(self, reduce_op, value, destinations):~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\autograph\impl\api.py in wrapper(*args, **kwargs) 290 def wrapper(*args, **kwargs): 291 with ag_ctx .ControlStatusCtx(status=ag_ctx.Status.DISABLED): --> 292 return func(*args, **kwargs) 293 294 if inspect.isfunction(func) 或 inspect.ismethod(func):
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in _predict_on_batch(解析 arguments 失敗)160 def _predict_on_batch(模型,x,y=None,sample_weights=None):16 del y, sample_weights --> 162 return predict_on_batch(model, x) 163 164 func = _predict_on_batch
~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in predict_on_batch(model, x) 357 # 驗證和標准化用戶數據。 358 個輸入,_, _ = model._standardize_user_data( --> 359 x, extract_tensors_from_dataset=True) 360 361 # 如果
model._distribution_strategy
為 True,則我們在副本上下文中~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)2470 feed_input_shapes,
2471 check_batch_axis = False,#不強制批量大小。 -> 2472 exception_prefix='input') 2473 2474 # 獲取輸入數據的類型規范並在必要時對其進行清理。~\Anaconda3\envs\tensorflow_cpu\lib\site-packages\tensorflow_core\python\keras\engine\training_utils.py instandardize_input_data(數據,名稱,形狀,check_batch_axis,exception_prefix)
563 ': expected ' + names[i] + ' to have ' + 564 str(len(shape)) + ' dimensions, but got array '
--> 565 'with shape ' + str(data_shape)) 566 if not check_batch_axis: 567 data_shape = data_shape[1:]
ValueError:檢查輸入時出錯:預期 conv2d_input 有 4 個維度,但得到的數組具有形狀(無,1)
你試圖預測一些字符串['test1.jpg']
,為什么? 您需要將數據准備為與訓練所用相同的形狀和分布 - 例如 - 加載圖像,將其調整大小/裁剪為相關大小,將其規范化為[0,1]
范圍內(如果這是你在訓練期間做的)等等......
首先,第一個conv
層的 input_shape 似乎是錯誤的。
input_shape = (IMG_SIZE, IMG_SIZE,1)
model.add(Conv2D(64,(3,3), input_shape = (IMG_SIZE, IMG_SIZE,1))
其次,無需為任何中間層指定input_shape
。
model.add(Conv2D(64,(3,3), input_shape = x.shape[1:IMG_SIZE]))
應該是
model.add(Conv2D(64,(3,3))
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.