docker ps -a
docker images
docker volume ls
docker network ls
docker volume prune
docker network prune
docker rm -f $(docker ps -aq)
docker rm -vf $(docker ps -aq)
docker rmi $(docker images | awk 'NR>1 {print $3}')
docker logs continer_id
docker logs -f container_id
docker logs -f --since 10m container
docker exec -it container_id bash
docker container run --publish 80:80 ngnix
docker container run --publish 80:80 --detach ngnix
docker container run --publish 80:80 --detach --name webhost ngnix
docker run --publish 80:80 ngnix
docker run --publish 80:80 --detach ngnix
docker run --publish 80:80 --detach --name webhost ngnix
docker container ls
docker ps
docker container stop CONTAINER_ID
docker stop CONTAINER_ID
docker container logs CONTAINER_ID
docker logs CONTAINER_ID
docker contaniner top CONTAINER_ID
docker top CONTAINER_ID
docker start -ai ubuntu
a
: attachi
: interactivedocker network ls
docker network inspect
docker network create --driver
docker network connect
docker network disconnect
docker layers image: docker history IMAGE:TAG
Size reduction tool: https://github.com/wagoodman/dive#dive
Distroless images: https://github.com/GoogleContainerTools/distroless
Build without cache: docker build --no-cache --pull ...
Note: with new Docker compose is built in so use it as sub command docker compose
instead of docker-compose
docker-compose -f docker-compose-file.yaml up
docker-compose -f docker-compose-file.yaml down
docker-compose -f docker-compose-file.yaml stop
docker-compose -f docker-compose-file.yaml start
docker-compose -f docker-compose-file.yaml up -d
docker-compose -f docker-compose-file.yaml logs
docker-compose -f docker-compose-file.yaml logs -f
docker-compose -f docker-compose-file.yaml down
docker-compose -f docker-compose-file.yaml down -v
ARG
The ARG instruction defines a variable that users can pass at build-time to the builder with the docker build command using the --build-arg <varname>=<value>
flag.
ARG user1
ARG buildno
With default : ARG buildno=1
usage of argument
ARG SETTINGS
RUN ./run/setup $SETTINGS
ARG ALPINE_VER
FROM alpine:${ALPINE_VER} as peer-base
FROM
COPY --from=<name|index>
instructions to refer to the image built in this stage.RUN
RUN <command>
or
RUN ["executable", "param1", "param2"]
WORKDIR
ENV DIRPATH /path
WORKDIR $DIRPATH/$DIRNAME
ADD
<src>
and adds them to the filesystem of the image at the path <dest>
ADD [--chown=<user>:<group>] <src>... <dest>
ADD [--chown=<user>:<group>] ["<src>",... "<dest>"]
ADD file:0c4555f363c2672e350001f1293e689875a3760afe7b3f9146886afe67121cba in /
COPY
COPY --from=peer /go/src/github.com/hyperledger/fabric/build/bin /usr/local/bin
CMD
CMD ["peer","node","start"]
EXPOSE
EXPOSE 80/udp
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
ARG GO_VER
ARG ALPINE_VER
FROM alpine:${ALPINE_VER} as peer-base
RUN apk add --no-cache tzdata
FROM golang:${GO_VER}-alpine${ALPINE_VER} as golang
RUN apk add --no-cache \
bash \
gcc \
git \
make \
musl-dev
ADD . $GOPATH/src/github.com/hyperledger/fabric
WORKDIR $GOPATH/src/github.com/hyperledger/fabric
FROM golang as peer
ARG GO_TAGS
RUN make peer GO_TAGS=${GO_TAGS}
FROM peer-base
ENV FABRIC_CFG_PATH /etc/hyperledger/fabric
VOLUME /etc/hyperledger/fabric
VOLUME /var/hyperledger
COPY --from=peer /go/src/github.com/hyperledger/fabric/build/bin /usr/local/bin
COPY --from=peer /go/src/github.com/hyperledger/fabric/sampleconfig/msp ${FABRIC_CFG_PATH}/msp
COPY --from=peer /go/src/github.com/hyperledger/fabric/sampleconfig/core.yaml ${FABRIC_CFG_PATH}
EXPOSE 7051
CMD ["peer","node","start"]
docker container run -d -p 5000:5000 --name registry -v $(pwd)/registry-data/:/var/lib/registry regsitry
docker push 127.0.0.1:5000/hello-world
/run/secrets/SECRET_NAME
creating secret
docker secret create db_secret db_secret.txt
echo "password" | docker secret create db_secret -
service with secrets
docker service create --name psql --secret psql_user --secret psql_pass -e POSTGRES_PASSWORD_FILE=/run/secrets/psql_pass -e POSTGRES_USER_FILE=/run/secrets/psql_user postgres
version: "3.1"
services:
psql:
image: postgres
secrets:
- psql_user
- psql_password
environment:
POSTGRES_PASSWORD_FILE: /run/secrets/psql_password
POSTGRES_USER_FILE: /run/secrets/psql_user
secrets:
psql_user:
file: ./psql_user.txt
psql_password:
file: ./psql_password.txt
docker stack deploy -c docker-compose.yml db_stack_name
docker stack deploy -c stack_example.yml STACK_NAME
docker stack rm STACK_NAME
docker stack ps STACK_NAME
docker stack ls
docker swarm init
docker node ls
docker service create alpine ping 8.8.8.8
docker service ps SERVICE_NAME
docker service update <ID> --replicas 3
docker service rm SERVICE_NAME
docker swarm init
docker swarm join --token SWMTKN-1-25v0dnvieneukkkgl8wd39ohlrbbs6z7k20r1se77y5q2vbs2e-3yhoecp5c8m8z2m0z3uhus4ia 10.128.0.2:2377
docker node update --role manager node 2
docker node update --role manager node 3
docker service create --replicas 3 alpine ping 8.8.8.8
--driver overlay
buildx
plugin uses Buildkit to build in concurrent manner (in multi stage builds)
install/docker/buildx-install.sh
version: "3"
services:
redis:
image: redis:alpine
ports:
- "6379"
networks:
- frontend
deploy:
replicas: 2
update_config:
parallelism: 2
delay: 10s
restart_policy:
condition: on-failure
db:
image: postgres:9.4
volumes:
- db-data:/var/lib/postgresql/data
networks:
- backend
deploy:
placement:
constraints: [node.role == manager]
vote:
image: dockersamples/examplevotingapp_vote:before
ports:
- 5000:80
networks:
- frontend
depends_on:
- redis
deploy:
replicas: 2
update_config:
parallelism: 2
restart_policy:
condition: on-failure
result:
image: dockersamples/examplevotingapp_result:before
ports:
- 5001:80
networks:
- backend
depends_on:
- db
deploy:
replicas: 1
update_config:
parallelism: 2
delay: 10s
restart_policy:
condition: on-failure
worker:
image: dockersamples/examplevotingapp_worker
networks:
- frontend
- backend
deploy:
mode: replicated
replicas: 1
labels: [APP=VOTING]
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
window: 120s
placement:
constraints: [node.role == manager]
networks:
frontend:
backend:
volumes:
db-data: