Problema com gridSearch

1. Problema com gridSearch

Gustavo Henrique Nunes
ghnunes

(usa Linux Mint)

Enviado em 23/08/2022 - 11:22h


Olá pessoal, estou fazendo alguns testes com o gridSearchCV, mas estou com esse erro, vocês sabem me dizer o motivo ?

in clone raise TypeError("Cannot clone object '%s' (type %s): "
TypeError: Cannot clone object '<keras.engine.functional.Functional object at 0x7f330fe610a0>' (type <class 'keras.engine.functional.Functional'>): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' method.


Modelo:



from keras import backend as K, regularizers
from keras.engine.training import Model
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, \
BatchNormalization, Activation, Input
import ModelLib


class Cifar100_Model(ModelLib.ModelLib):
def build_classifier_model(self, dataset, n_classes=5,
activation='elu', dropout_1_rate=0.25,
dropout_2_rate=0.5,
reg_factor=50e-4, bias_reg_factor=None, batch_norm=False):

n_classes = dataset.n_classes
print(n_classes)
print("----------------------------------------------------------------------------")
l2_reg = regularizers.l2(reg_factor) #K.variable(K.cast_to_floatx(reg_factor))
l2_bias_reg = None
if bias_reg_factor:
l2_bias_reg = regularizers.l2(bias_reg_factor) #K.variable(K.cast_to_floatx(bias_reg_factor))

# input image dimensions
h, w, d = 32, 32, 3

if K.image_data_format() == 'channels_first':
input_shape = (d, h, w)
else:
input_shape = (h, w, d)

# input image dimensions
x = input_1 = Input(shape=input_shape)

x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=32, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)

x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=64, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)

x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=128, kernel_size=(3, 3), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)

x = Conv2D(filters=256, kernel_size=(2, 2), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = Conv2D(filters=256, kernel_size=(2, 2), padding='same', kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(rate=dropout_1_rate)(x)

x = Flatten()(x)
x = Dense(units=512, kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation=activation)(x)


x = Dropout(rate=dropout_2_rate)(x)
x = Dense(units=n_classes, kernel_regularizer=l2_reg, bias_regularizer=l2_bias_reg)(x)
if batch_norm:
x = BatchNormalization()(x)
x = Activation(activation='softmax')(x)

model = Model(inputs=[input_1], outputs=[x])
return model



Código teste:



import models.cifar100_model

def load_model():
return models.cifar100_model.Cifar100_Model()

def get_params(self, deep = True):
return {"learning rate" self.learning_rate}


model_lib = load_model()

model = model_lib.build_classifier_model(dataset)

from sklearn.model_selection import GridSearchCV
x_train = dataset.x_train
y_train = dataset.y_train_labels
learning_rate = [0.01, 0.1]
param_grid = dict(learning_rate = learning_rate)
grid = GridSearchCV(estimator = model, param_grid=param_grid, n_jobs=-1, cv=3, scoring='accuracy')
gridResult = grid.fit(x_train,y_train)






  






Patrocínio

Site hospedado pelo provedor RedeHost.
Linux banner

Destaques

Artigos

Dicas

Tópicos

Top 10 do mês

Scripts