简体   繁体   中英

Error : Value passed to parameter 'input' has DataType int64 not in list of allowed values: float16, bfloat16, float32, float64?

I have this code and there is an error when applying prediction?

import pandas as pd
import numpy as np
import sklearn

import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.layers import Conv2D,Conv1D, MaxPooling2D,MaxPooling1D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout,BatchNormalization

dataset=pd.read_csv("C:/Users/User/Desktop/data.csv",encoding='cp1252')
dataset.shape
#output:(53480, 37)
array = dataset.values
X = array[:,0:36]
Y = array[:,36]
kf = KFold(n_splits=10)
kf.get_n_splits(X)
ACC_array = np.array([])
sensitivity_array = np.array([])
specificity_array = np.array([])
for trainindex, testindex in kf.split(X):
Xtrain, Xtest = X[trainindex], X[testindex]
Ytrain, Ytest = Y[trainindex], Y[testindex]
Xtrain = np.expand_dims(np.random.normal(size=(53480, 36)),axis=-1)

Ytrain = np.random.choice([0,1], size=(53480,10))
n_timesteps, n_features, n_outputs =Xtrain.shape[0], Xtrain.shape[1], Ytrain.shape[1]

model = Sequential()
model.add(Conv1D(filters=64, kernel_size=1, 
activation='relu',input_shape=(n_features,1)))

model.add(Conv1D(filters=64, kernel_size=1, activation='relu'))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dense(n_outputs, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit network
model.fit(Xtrain, Ytrain, epochs=10, batch_size=128, verbose=1)
model.summary()


#output:
Model: "sequential"
.
.

Predictions = model.predict(Xtest,batch_size =1024)
rounded = [round(x[0]) for x in Predictions]
Y_predection = pd.DataFrame(rounded)
Y_predection = Y_predection.iloc[:, 0]

Error message:

TypeError                                 Traceback (most recent call last)
<ipython-input-16-67624699b454> in <module>
----> 1 Predictions = model.predict(Xtest,batch_size =1024)
  2 rounded = [round(x[0]) for x in Predictions]
  3 Y_predection = pd.DataFrame(rounded)
  4 Y_predection = Y_predection.iloc[:, 0]
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
907         max_queue_size=max_queue_size,
908         workers=workers,
--> 909         use_multiprocessing=use_multiprocessing)
910 
911   def reset_metrics(self):
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in predict(self, model, x, batch_size, verbose, steps, callbacks, **kwargs)
460     return self._model_iteration(
461         model, ModeKeys.PREDICT, x=x, batch_size=batch_size, 
verbose=verbose,
--> 462         steps=steps, callbacks=callbacks, **kwargs)
463 
464 
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in _model_iteration(self, model, mode, x, y, batch_size, verbose, sample_weight, steps, callbacks, **kwargs)
442               mode=mode,
443               training_context=training_context,
--> 444               total_epochs=1)
445           cbks.make_logs(model, epoch_logs, result, mode)
446 
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
121         step=step, mode=mode, size=current_batch_size) as batch_logs:
122       try:
--> 123         batch_outs = execution_function(iterator)
124       except (StopIteration, errors.OutOfRangeError):
125         # TODO(kaftan): File bug about tf function and 
errors.OutOfRangeError?
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in execution_function(input_fn)
 84     # `numpy` translates Tensors to values in Eager mode.
 85     return nest.map_structure(_non_none_constant_value,
---> 86                               distributed_function(input_fn))
 87 
 88   return execution_function
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\def_function.py in __call__(self, *args, **kwds)
455 
456     tracing_count = self._get_tracing_count()
--> 457     result = self._call(*args, **kwds)
458     if tracing_count == self._get_tracing_count():
459       self._call_counter.called_without_tracing()
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\def_function.py in _call(self, *args, **kwds)
501       # This is the first call of __call__, so we have to initialize.
502       initializer_map = object_identity.ObjectIdentityDictionary()
--> 503       self._initialize(args, kwds, add_initializers_to=initializer_map)
504     finally:
505       # At this point we know that the initialization is complete (or 
less

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
406     self._concrete_stateful_fn = (
407         
self._stateful_fn._get_concrete_function_internal_garbage_collected(  # pylint: disable=protected-access
--> 408             *args, **kwds))
409 
410     def invalid_creator_scope(*unused_args, **unused_kwds):

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
1846     if self.input_signature:
1847       args, kwargs = None, None
-> 1848     graph_function, _, _ = self._maybe_define_function(args, kwargs)
1849     return graph_function
1850 
~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\eager\function.py in 
_maybe_define_function(self, args, kwargs)
2148         graph_function = self._function_cache.primary.get(cache_key, 
None)
2149         if graph_function is None:
-> 2150           graph_function = self._create_graph_function(args, kwargs)
2151           self._function_cache.primary[cache_key] = graph_function
2152         return graph_function, args, kwargs
~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\eager\function.py in 
_create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2039             arg_names=arg_names,
2040             override_flat_arg_shapes=override_flat_arg_shapes,
-> 2041             capture_by_value=self._capture_by_value),
2042         self._function_attributes,
2043         # Tell the ConcreteFunction to clean up its graph once it goes 
out of

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
913                                           converted_func)
914 
--> 915       func_outputs = python_func(*func_args, **func_kwargs)
916 
917       # invariant: `func_outputs` contains only Tensors, 
 CompositeTensors,

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\eager\def_function.py in wrapped_fn(*args, **kwds)
356         # __wrapped__ allows AutoGraph to swap in a converted function. We give
357         # the function a weak reference to itself to avoid a reference 
cycle.
--> 358         return weak_wrapped_fn().__wrapped__(*args, **kwds)
359     weak_wrapped_fn = weakref.ref(wrapped_fn)
360 

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in distributed_function(input_iterator)
 71     strategy = distribution_strategy_context.get_strategy()
 72     outputs = strategy.experimental_run_v2(
---> 73         per_replica_function, args=(model, x, y, sample_weights))
 74     # Out of PerReplica outputs reduce or pick values to return.
 75     all_outputs = dist_utils.unwrap_output_dict(

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in experimental_run_v2(self, fn, args, kwargs)
758       fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(),
759                                 convert_by_default=False)
--> 760       return self._extended.call_for_each_replica(fn, args=args, 
kwargs=kwargs)
761 
762   def reduce(self, reduce_op, value, axis):
~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\distribute\distribute_lib.py in 
call_for_each_replica(self, fn, args, kwargs)
   1785       kwargs = {}
   1786     with self._container_strategy().scope():
 -> 1787       return self._call_for_each_replica(fn, args, kwargs)
   1788 
   1789   def _call_for_each_replica(self, fn, args, kwargs):

    ~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py in 
_call_for_each_replica(self, fn, args, kwargs)
2130         self._container_strategy(),
2131         replica_id_in_sync_group=constant_op.constant(0, 
dtypes.int32)):
-> 2132       return fn(*args, **kwargs)
   2133 
   2134   def _reduce_to(self, reduce_op, value, destinations):

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\autograph\impl\api.py in wrapper(*args, **kwargs)
290   def wrapper(*args, **kwargs):
291     with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
--> 292       return func(*args, **kwargs)
293 
294   if inspect.isfunction(func) or inspect.ismethod(func):

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in _predict_on_batch(***failed resolving arguments***)
160     def _predict_on_batch(model, x, y=None, sample_weights=None):
161       del y, sample_weights
--> 162       return predict_on_batch(model, x)
163 
164     func = _predict_on_batch

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py in predict_on_batch(model, x)
368 
369   with backend.eager_learning_phase_scope(0):
--> 370     return model(inputs)  # pylint: disable=not-callable

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
845                     outputs = base_layer_utils.mark_as_return(outputs, acd)
846                 else:
--> 847                   outputs = call_fn(cast_inputs, *args, **kwargs)
848 
849             except errors.OperatorNotAllowedInGraphError as e:

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\sequential.py in call(self, inputs, training, mask)
254       if not self.built:
255         self._init_graph_network(self.inputs, self.outputs, name=self.name)
--> 256       return super(Sequential, self).call(inputs, training=training, mask=mask)
257 
258     outputs = inputs  # handle the corner case where self.layers is empty

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\network.py in call(self, inputs, training, mask)
706     return self._run_internal_graph(
707         inputs, training=training, mask=mask,
--> 708         convert_kwargs_to_constants=base_layer_utils.call_context().saving)
709 
710   def compute_output_shape(self, input_shape):

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\network.py in _run_internal_graph(self, inputs, training, mask, convert_kwargs_to_constants)
858 
859           # Compute outputs.
--> 860           output_tensors = layer(computed_tensors, **kwargs)
861 
862           # Update tensor_dict.

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
845                     outputs = base_layer_utils.mark_as_return(outputs, acd)
846                 else:
--> 847                   outputs = call_fn(cast_inputs, *args, **kwargs)
848 
849             except errors.OperatorNotAllowedInGraphError as e:

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\layers\convolutional.py in call(self, inputs)
385     if self.padding == 'causal':
386       inputs = array_ops.pad(inputs, self._compute_causal_padding())
--> 387     return super(Conv1D, self).call(inputs)
388 
389 

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\keras\layers\convolutional.py in call(self, inputs)
195 
196   def call(self, inputs):
--> 197     outputs = self._convolution_op(inputs, self.kernel)
198 
199     if self.use_bias:
~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\ops\nn_ops.py in __call__(self, inp, 
filter)
1132           call_from_convolution=False)
1133     else:
-> 1134       return self.conv_op(inp, filter)
1135     # copybara:strip_end
1136     # copybara:insert return self.conv_op(inp, filter)
~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\nn_ops.py in __call__(self, inp, filter)
637 
638   def __call__(self, inp, filter):  # pylint: disable=redefined- 
builtin
--> 639     return self.call(inp, filter)
640 
641  

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\nn_ops.py in __call__(self, inp, filter)
236         padding=self.padding,
237         data_format=self.data_format,
--> 238         name=self.name)
239 
240 

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\ops\nn_ops.py in _conv1d(self, input, filter, strides, padding, data_format, name)
225         padding=padding,
226         data_format=data_format,
--> 227         name=name)
228 
229   # pylint: enable=redefined-builtin

~\.conda\envs\tensorflow\lib\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
572                   func.__module__, arg_name, arg_value, 'in a future version'
573                   if date is None else ('after %s' % date), instructions)
--> 574       return func(*args, **kwargs)
575 
576     doc = _add_deprecated_arg_value_notice_to_docstring(
~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\util\deprecation.py in new_func(*args, 
**kwargs)
572                   func.__module__, arg_name, arg_value, 'in a future 
version'
573                   if date is None else ('after %s' % date), 
instructions)
--> 574       return func(*args, **kwargs)
575 
576     doc = _add_deprecated_arg_value_notice_to_docstring(
~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\ops\nn_ops.py in conv1d(value, filters, 
stride, padding, use_cudnn_on_gpu, data_format, name, input, dilations)
1679         data_format=data_format,
1680         dilations=dilations,
-> 1681         name=name)
1682     return array_ops.squeeze(result, [spatial_start_dim])
1683 

~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\ops\gen_nn_ops.py in conv2d(input, filter, 
strides, padding, use_cudnn_on_gpu, explicit_paddings, data_format, 
dilations, name)
1068                   padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu,
1069                   explicit_paddings=explicit_paddings,
-> 1070                   data_format=data_format, dilations=dilations, 
name=name)
1071   _result = _op.outputs[:]
1072   _inputs_flat = _op.inputs

~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\framework\op_def_library.py in 
_apply_op_helper(self, op_type_name, name, **keywords)
629               _SatisfiesTypeConstraint(base_type,
630                                        _Attr(op_def, 
input_arg.type_attr),
--> 631                                        param_name=input_name)
632             attrs[input_arg.type_attr] = attr_value
633             inferred_from[input_arg.type_attr] = input_name

~\.conda\envs\tensorflow\lib\site- 
packages\tensorflow_core\python\framework\op_def_library.py in 
_SatisfiesTypeConstraint(dtype, attr_def, param_name)
 58           "allowed values: %s" %
 59           (param_name, dtypes.as_dtype(dtype).name,
---> 60            ", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
 61 
 62 

TypeError: Value passed to parameter 'input' has DataType int64 not in list of allowed values: float16, bfloat16, float32, float64   

I want to apply 1D CNN on tabular data(numerical) , how to evaluate the model by Cross validation to calcluate (Accuracy, Sensitivity, Specificity) , how to fix the error above? or how to calculate confusion metrics by using 70% for training and 30% for testing ?

The error is occured at the line:

Predictions = model.predict(Xtest,batch_size =1024)

Because after you defined:

Xtrain, Xtest = X[trainindex], X[testindex]

You didn't do the dimension expanding on Xtest like at Xtrain 's randomized redifinition (which is I guess just about some test purpose):

Xtrain = np.expand_dims(np.random.normal(size=(213412, 36)),axis=-1)

So Xtrain has had the proper 3D shape of (?, 36, 1) , exactly as you defined the input sizes with input_shape=(n_features,1) in your code:

model.add(Conv1D(filters=64, kernel_size=1, 
activation='relu',input_shape=(n_features,1)))

while Xtest remained 2D data ie with the shape of (?, 36) or exactly (5348, 36 ). That is why your model complains about input shape.

Thus do a np.expand_dims() on Xtest too with:

Xtest = np.expand_dims(Xtest, axis=-1)

Update:

As I saw in a later comment of you, that you don't understand why " the accuracy gradually decreased, and the loss function is increasing vary significantly ". That is why you redifined your X_train as mentioned above with: np.random.normal(size=(53480, 36)) in the line:

Xtrain = np.expand_dims(np.random.normal(size=(53480, 36)),axis=-1)

So your model trying to fit to random data.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM