Wandb is doing the same possibility multiple times

hi, i am working on private data with keras. I use different activation functions and optimization algorithms. I’m doing this wandb sweep, but it trains one possibility more than once. example relu-lr=0.001-adam-batch_size=4 has trained probability more than 6 times. what is the reason of this. and I’m not sure of the correctness of my code

my code :

import tensorflow as tf
import numpy as np
base_dir="/content/f1"

train_datagen=tf.keras.preprocessing.image.ImageDataGenerator(
    rescale=1./255,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    validation_split=0.1
    )

test_datagen=tf.keras.preprocessing.image.ImageDataGenerator(
     rescale=1./255,
     validation_split=0.1
)

train_datagen=train_datagen.flow_from_directory(
    base_dir,
    target_size=(500,500),
    subset='training',
    batch_size=2
)

test_datagen=test_datagen.flow_from_directory(
    base_dir,
    target_size=(500,500),
    subset='validation',
    batch_size=2
)
wandb.login()
sweep_config = {
    'method': 'random',
    'metric': {
      'name': 'accuracy',
      'goal': 'maximize'   
    },
    'parameters': {
        'epochs': {
            'values': [2,4]
        },
        'learning_rate': {
            'values': [0.01,0.001]
        },
        'optimizer': {
            'values': ['adam','rmsprop']
        },
        'activation': {
            'values': ['relu', 'elu', 'selu']
        }
    }
}
sweep_id = wandb.sweep(sweep_config, entity="sdad", project="func")
def train():
    config_defaults = {
        'epochs': 2,
        'batch_size': 2,
        'learning_rate': 0.001,
        'activation': 'relu',
        'optimizer': 'adam',
        'seed': 42
    }

    wandb.init(config=config_defaults)
    
    config = wandb.config
    
    model= Sequential()

    model.add(layers.Conv2D(filters=4,activation=config.activation,kernel_size=(5,5),input_shape=(500,500,3)))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Conv2D(filters=8,activation=config.activation,kernel_size=(3,3)))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Conv2D(filters=16,activation=config.activation,kernel_size=(2,2)))
    model.add(layers.MaxPooling2D((2,2)))
    model.add(layers.Conv2D(filters=32,activation=config.activation,kernel_size=(2,2)))

    model.add(layers.Flatten())

    model.add(Dense(50,activation=config.activation))
    model.add(Dense(100,activation=config.activation))
    model.add(Dense(100,activation=config.activation))
    model.add(Dense(50,activation=config.activation))
    model.add(Dense(4,activation="softmax"))

  
    model.compile(loss = "categorical_crossentropy", optimizer = config.optimizer, metrics=['accuracy'])

    model.fit(train_datagen, batch_size=config.batch_size,
              epochs=config.epochs,
              validation_data=test_datagen,
              callbacks=[WandbCallback(data_type="image", validation_data=test_datagen)])

Hey Yasar, I believe the issue is that you are using random search with a list of constant parameters. Random search will retry parameter combinations after a while. You can pass a count argument to the wandb agent call to specify the number of experiments to run to avoid this.

Thank you my friend, the problem has been solved thanks to you. I am thankful to you

Glad to hear it, Yasar