du -sh .[^.]* in your home directory and look for any large dot directories. Common issues are large .cache directory and large .conda directory
shifterimg pull bytesco/ncdu
shifter --entrypoint --image=bytesco/ncduThis page describes setup for running calculations at NERSC's Perlmutter HPC.
alias perlmutter="ssh <your_username>@perlmutter.nersc.gov"scp user_name@dtn01.nersc.gov:/remote/path/myfile.txt /local/pathscp /local/path/myfile.txt user_name@dtn01.nersc.gov:/remote/path#!/bin/bash -l
#SBATCH -N 2 #Use 2 nodes
#SBATCH -t 00:30:00 #Set 30 minute time limit
#SBATCH -q regular #Submit to the regular QOS
#SBATCH -L scratch #Job requires $SCRATCH file system
#SBATCH -C cpu #Use cpu nodes
srun -n 32 -c 4 ./my_executablesqs -u username (Lists jobs for your account)
scancel job_id (Cancels a job from the queue)#!/bin/bash -l
#SBATCH -N 2 #Use 2 nodes
#SBATCH -t 00:30:00 #Set 30 minute time limit
#SBATCH -q regular #Submit to the regular QOS
#SBATCH -C GPU #Use GPU
srun -n 8 -c 32 --cpu-bind=cores -G 8 ./my_executable*/120 * * * * /bin/bash -l PATH_TO_SCRIPT.sh >> PATH_TO_LOGFILErlaunch -c /your/config multi Nrocket_launch: rlaunch -c /your/config multi 10 --nlaunches 0 --timeout 169200
nodes: 20source activate YOUR_PRODUCTION_CONDA_ENVIRONMENT FW_CONFIG_FILE=PATH_TO_CONFIG_DIR/FW_config.yaml
cd PATH_TO_YOUR_PRODUCTION_FOLDER
qlaunch --fill_mode rapidfire -m 1000 --nlaunches 1$ module load python$ cd /global/common/software/matgen # or "cd /global/common/software/jcesr"
$ mkdir <username> # change <username> to your NERSC user name
$ chmod g-w <username> # remove group write access to avoid others changing it $ cd <username>
$ mkdir conda && mkdir conda/envs$ conda config --prepend envs_dirs /global/common/software/matgen/<username>/conda/envs$ conda config --showenvs_dirs:
- /global/common/software/matgen/<username>/conda/envs
- /global/homes/m/<username>/.conda/envs
- /global/common/software/nersc/pm-2021q4/sw/python/3.9-anaconda-2021.11/envs$ mkdir /global/common/software/matgen/<username>/conda/pkgs
$ conda config --prepend pkgs_dirs /global/common/software/matgen/<username>/conda/pkgsHost *.brc.berkeley.edu
ControlMaster auto
ControlPath ~/.ssh/sockets/%r@%h-%p
ControlPersist 600ssh your_username@hpc.brc.berkeley.edualias savio="ssh your_username@hpc.brc.berkeley.edu"module load <module_name>module availmodule listmodule unload <module_name>
module purgeexport MODULEPATH=${MODULEPATH}:/global/home/groups/co_lsdi/sl7/modfiles#!bin/bash -l
#SBATCH --nodes=1 #Use 1 node
#SBATCH --ntasks=64 #Use 64 tasks for the job
#SBATCH --qos=lsdi_knl2_normal #Set job to normal qos
#SBATCH --time=01:00:00 #Set 1 hour time limit for job
#SBATCH --partition=savio2_knl #Submit to the KNL nodes owned by the Persson Group
#SBATCH --account=co_lsdi #Charge to co_lsdi accout
#SBATCH --job-name=savio2_job #Name for the job
mpirun --bind-to core <executable>#!bin/bash -l
#SBATCH --nodes=1 #Use 1 node
#SBATCH --ntasks_per_core=1 #Use 1 task per core on the node
#SBATCH --qos=savio_lowprio #Set job to low priority qos
#SBATCH --time=01:00:00 #Set 1 hour time limit for job
#SBATCH --partition=savio2 #Submit to the haswell nodes
#SBATCH --account=co_lsdi #Charge to co_lsdi accout
#SBATCH --job-name=savio2_job #Name for the job
mpirun --bind-to core <executable>This page describes how to get set up to run calculations at LRC on the Lawrencium cluster.
ssh your_username@lrc-login.lbl.govalias lawrencium="ssh <your_username>@lrc-login.lbl.gov"module load <module_name>module availmodule listmodule unload <module_name>
module purge#!/bin/bash
# Job name:
#SBATCH --job-name=<job_name>
#
# Partition:
#SBATCH --partition=cf1
#
# QoS:
#SBATCH --qos=condo_mp_cf1
#
# Account:
#SBATCH --account=lr_mp
#
# Nodes (IF YOU CHANGE THIS YOU MUST CHANGE ntasks too!!!):
#SBATCH --nodes=1
#
# Processors (MUST BE 64xNUM_NODES ALWAYS!!!):
#SBATCH --ntasks=64
#
# Wall clock limit:
#SBATCH --time=24:00:00
## Run command
module load vasp/6.prerelease-vdw
export OMP_PROC_BIND=true
export OMP_PLACES=threads
export OMP_NUM_THREADS=1 # NEVER CHANGE THIS!!
mpirun --bind-to core <executable>#!/bin/bash
# Job name:
#SBATCH --job-name=<job_name>
#
# Partition:
#SBATCH --partition=es1
#
# QoS:
#SBATCH --qos=condo_mp_es1
#
# Account:
#SBATCH --account=lr_mp
#
# GPUs:
#SBATCH --gres=gpu:2
#
# CPU cores:
#SBATCH --cpus-per-task=8
#
# Constraints:
#SBATCH --constraint=es1_v100
#
# Wall clock limit:
#SBATCH --time=24:00:00
export CUDA_VISIBLE_DEVICES=0,1
module load cuda/10.0#!/bin/bash
# Job name:
#SBATCH --job-name=<job_name>
#
# Partition:
#SBATCH --partition=es1
#
# QoS:
#SBATCH --qos=es_lowprio
#
# Account:
#SBATCH --account=lr_mp
#
# GPUs:
#SBATCH --gres=gpu:2
#
# CPU cores:
#SBATCH --cpus-per-task=8
#
# Constraints:
#SBATCH --constraint=es1_v100
#
# Wall clock limit:
#SBATCH --time=24:00:00
export CUDA_VISIBLE_DEVICES=0,1
module load cuda/10.0ssh johndoe@kestrel.nrel.govssh johndoe@hpcsh.nrel.govcd ~/ #ensures you start off in your home dir
mkdir -p ~/miniconda3
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
rm ~/miniconda3/miniconda.sh
~/miniconda3/bin/conda init bash
~/miniconda3/bin/conda init zsh
source ~/.bashrcconda create -n cms python=3.9 pandas seaborn numpy scipy matplotlib
conda activate cms
pip install git+https://github.com/materialsproject/atomate2.git #replace with your desired way to downloading atomate2
pip install fireworks
pip install pydantic==2.4.2 #to avoid some pydantic bugs with fireworks...VASP_CMD: srun -n 104 -c 1 vasp_std
VASP_GAMMA_CMD: srun -n 104 -c 1 vasp_gamVASP_CMD: srun -n 104 -c 1 --cpu-bind=cores --gpu-bind=single:1 -G 4 vasp_std
VASP_GAMMA_CMD: srun -n 104 -c 1 --cpu-bind=cores --gpu-bind=single:1 -G 4 vasp_stdVASP_CMD: srun -n 104 -c 2 vasp_std
VASP_GAMMA_CMD: srun -n 104 -c 2 vasp_gamJOB_STORE:
docs_store:
type: MongoStore
database: johndoe_general
host: mongodb03.nersc.gov
port: 27017
username: johndoe_general_admin
password: ***PASSWORD***
collection_name: kestrel_outputs
additional_stores:
data:
type: GridFSStore
database: johndoe_general
host: mongodb03.nersc.gov
port: 27017
username: johndoe_general_admin
password: ***PASSWORD***
collection_name: kestrel_outputs_blobsJOB_STORE:
docs_store:
type: MongoStore
database: johndoe_general
host: mongodb03.nersc.gov
port: 27017
username: johndoe_general_admin
password: ***PASSWORD***
collection_name: kestrel_outputs
additional_stores:
data:
type: GridFSStore
database: johndoe_general
host: mongodb03.nersc.gov
port: 27017
username: johndoe_general_admin
password: ***PASSWORD***
collection_name: kestrel_outputs_blobs
trajectory:
type: GridFSStore
database: johndoe_general_admin
host: mongodb03.nersc.gov
port: 27017
username: johndoe_general_admin
password: s***PASSWORD***
collection_name: kestrel_trajectory_blobs