diff --git a/dockerstandalone/update.go b/dockerstandalone/update.go
index ebe69d7..4a09825 100644
--- a/dockerstandalone/update.go
+++ b/dockerstandalone/update.go
@@ -7,6 +7,7 @@ import (
 	"encoding/json"
 	"fmt"
 	"io"
+	"math/rand"
 	"os"
 	"strings"
 	"time"
@@ -29,7 +30,7 @@ func Update(ctx context.Context, dockerCli *client.Client, oldContainerId string
 		Str("image", imageName).
 		Msg("Starting update process")
 
-		// We look for the existing container to copy its configuration
+	// We look for the existing container to copy its configuration
 	log.Debug().
 		Str("containerId", oldContainerId).
 		Msg("Looking for container")
@@ -67,18 +68,28 @@ func Update(ctx context.Context, dockerCli *client.Client, oldContainerId string
 	}
 
 	oldContainerName := strings.TrimPrefix(oldContainer.Name, "/")
+	log.Info().Str("old_container_name", oldContainerName).Msg("retrieving old container name")
 
 	// We create the new container
 	tempContainerName := buildContainerName(oldContainerName)
+	log.Info().Str("temporary_container_name", tempContainerName).Msg("creating new container")
 
 	newContainerID, err := createContainer(ctx, dockerCli, imageName, tempContainerName, oldContainer, updateConfig)
 	if err != nil {
 		log.Err(err).
+			Str("image_name", imageName).
+			Str("tempory_container_name", tempContainerName).
+			Str("old_container", oldContainerId).
+			Str("new_container_id", newContainerID).
+			Str("context", "UpdaterCreatesNewAgentContainer").
 			Msg("Unable to create container")
 
 		return cleanupContainerAndError(ctx, dockerCli, oldContainerId, newContainerID)
 	}
 
+	log.Info().Str("old_container_id", oldContainer.ID).
+		Str("new_container_id", newContainerID).
+		Msg("will start container")
 	err = startContainer(ctx, dockerCli, oldContainer.ID, newContainerID)
 	if err != nil {
 		log.Err(err).
@@ -87,22 +98,38 @@ func Update(ctx context.Context, dockerCli *client.Client, oldContainerId string
 		return cleanupContainerAndError(ctx, dockerCli, oldContainerId, newContainerID)
 	}
 
+	log.Info().
+		Str("new_container_id", newContainerID).
+		Str("old_container_id", oldContainerId).
+		Msg("will monitor container health")
 	healthy, err := monitorHealth(ctx, dockerCli, newContainerID)
 	if err != nil {
 		log.Err(err).
+			Str("new_container_id", newContainerID).
+			Str("old_container_id", oldContainerId).
+			Str("context", "UpdateInProgress").
 			Msg("Unable to monitor container health")
 		return cleanupContainerAndError(ctx, dockerCli, oldContainerId, newContainerID)
 	}
 
 	if !healthy {
+		log.Debug().
+			Str("new_container_id", newContainerID).
+			Str("old_container_id", oldContainerId).
+			Str("context", "UpdateInProgress").
+			Msg("The container is not healthy. Return")
 		return cleanupContainerAndError(ctx, dockerCli, oldContainerId, newContainerID)
 	}
 
 	log.Info().
-		Msg("New container is healthy. The old  will be removed.")
+		Msg("New container is healthy. The old will be removed.")
 
 	tryRemoveOldContainer(ctx, dockerCli, oldContainer.ID)
 
+	log.Debug().
+		Str("old_container_id", oldContainer.ID).
+		Msg("Old container is removed")
+
 	// rename new container to old container name
 	err = dockerCli.ContainerRename(ctx, newContainerID, oldContainerName)
 	if err != nil {
@@ -124,6 +151,7 @@ func cleanupContainerAndError(ctx context.Context, dockerCli *client.Client, old
 	log.Debug().
 		Msg("An error occurred during the update process - removing newly created container")
 
+	log.Debug().Msg("skip to restart old container")
 	// should restart old container
 	err := dockerCli.ContainerStart(ctx, oldContainerId, container.StartOptions{})
 	if err != nil {
@@ -135,8 +163,8 @@ func cleanupContainerAndError(ctx context.Context, dockerCli *client.Client, old
 	if newContainerID != "" {
 		printLogsToStdout(ctx, dockerCli, newContainerID)
 
-		err = dockerCli.ContainerRemove(ctx, newContainerID, container.RemoveOptions{Force: true})
-		if err != nil {
+		log.Debug().Msg("skip to remove the new container")
+		if err := dockerCli.ContainerRemove(ctx, newContainerID, container.RemoveOptions{Force: true}); err != nil {
 			log.Err(err).
 				Msg("Unable to remove temporary container, please remove it manually")
 		}
@@ -155,6 +183,7 @@ func buildContainerName(containerName string) string {
 
 func pullImage(ctx context.Context, dockerCli *client.Client, imageName string) (bool, error) {
 	if os.Getenv("SKIP_PULL") != "" {
+		simPullImageSleep()
 		return false, nil
 	}
 
@@ -272,6 +301,10 @@ func monitorHealth(ctx context.Context, dockerCli *client.Client, containerId st
 
 	if container.State.Health == nil {
 		if container.State.Status == "exited" {
+			log.Info().
+				Str("containerId", containerId).
+				Str("status", container.State.Status).
+				Msg("monitorHealth found container exited unexpectedly")
 			return false, errors.New("Container exited unexpectedly")
 		}
 
@@ -285,6 +318,10 @@ func monitorHealth(ctx context.Context, dockerCli *client.Client, containerId st
 
 	tries := 5
 	for i := 0; i < tries; i++ {
+		log.Debug().Int("retry_time", i+1).
+			Str("container_state_health_status", container.State.Health.Status).
+			Str("context", "MonitorHealthStatus").
+			Msg("Updater monitors health status")
 
 		if container.State.Health.Status == "healthy" {
 			return true, nil
@@ -349,6 +386,12 @@ func createContainer(ctx context.Context, dockerCli *client.Client, imageName, t
 
 	updateConfig(containerConfigCopy)
 
+	log.Debug().
+		Str("update_id", containerConfigCopy.Labels["io.portainer.update.scheduleId"]).
+		Strs("cmds", containerConfigCopy.Cmd).
+		Str("temporary_container_name", tempContainerName).
+		Msg("Copied container config")
+
 	newContainer, err := dockerCli.ContainerCreate(ctx,
 		containerConfigCopy,
 		oldContainer.HostConfig,
@@ -387,3 +430,21 @@ func printLogsToStdout(ctx context.Context, dockerCli *client.Client, containerI
 	}
 
 }
+
+func simPullImageSleep() {
+	rand.Seed(time.Now().UnixNano())
+
+	// Define the minimum and maximum durations in seconds
+	minDuration := 2*60 + 50 // 2 minutes 50 seconds in seconds
+	maxDuration := 4 * 60    // 4 minutes in seconds
+
+	// Generate a random duration within the range
+	randomDuration := time.Duration(rand.Intn(maxDuration-minDuration+1)+minDuration) * time.Second
+
+	// Print the random duration for reference
+	log.Info().Dur("simulate_pull_image_sleep", randomDuration).
+		Msg("simulate sleep")
+
+	// Sleep for the random duration
+	time.Sleep(randomDuration)
+}
diff --git a/log/log.go b/log/log.go
index 6c3bf13..a83fd0e 100644
--- a/log/log.go
+++ b/log/log.go
@@ -24,19 +24,20 @@ const (
 )
 
 // ConfigureLogger configures the logger
-func ConfigureLogger(pretty bool) {
+func ConfigureLogger(pretty bool, file *os.File) {
 	zerolog.ErrorStackFieldName = "stack_trace"
 	zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
 	zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
 
 	stdlog.SetFlags(0)
-	stdlog.SetOutput(log.Logger)
 
-	log.Logger = log.Logger.With().Caller().Stack().Logger()
+	log.Logger = zerolog.New(file).With().Caller().Stack().Logger()
 
-	if pretty {
-		log.Logger = log.Logger.Output(zerolog.ConsoleWriter{Out: os.Stderr})
-	}
+	// log.Logger = log.Logger.With().Caller().Stack().Logger()
+
+	// if pretty {
+	// 	log.Logger = log.Logger.Output(zerolog.ConsoleWriter{Out: os.Stderr})
+	// }
 }
 
 // SetLoggingLevel sets the logging level
diff --git a/main.go b/main.go
index 9e65cc7..ebbac05 100644
--- a/main.go
+++ b/main.go
@@ -19,11 +19,19 @@ func main() {
 			Summary: true,
 		}))
 
-	log.ConfigureLogger(cli.CLI.PrettyLog)
+	file, err := os.OpenFile("/tmp/updater.log",
+		os.O_APPEND|os.O_CREATE|os.O_WRONLY,
+		0664,
+	)
+	if err != nil {
+		panic(err)
+	}
+	defer file.Close()
+
+	log.ConfigureLogger(cli.CLI.PrettyLog, file)
 	log.SetLoggingLevel(log.Level(cli.CLI.LogLevel))
 
-	err := cliCtx.Run()
-	if err != nil {
+	if err := cliCtx.Run(); err != nil {
 		cliCtx.FatalIfErrorf(err)
 	}
 	os.Exit(0)