Tutorial: Phonons, EELS and magnons for HPC and GPUs
Phonon modes of CnSnI3 at Gamma
pwscf simulation, step 1
Files needed:
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:10:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=4
6#SBATCH --ntasks-per-socket=2
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:4
9#SBATCH --job-name=phstep1
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13#SBATCH --reservation=maxgpu
14
15export WORK=/ceph/hpc/data/d2021-135-users
16
17module purge
18module use ${WORK}/modules
19
20module load QuantumESPRESSO/DEV-NVHPC-21.2
21
22export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
23export OMP_NUM_THREADS=1
24
25mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core"
26
27mpirun $mpiopt -np npw pw.x -ni 1 -nk 1 -i pw.CnSnI3.in > pw.CnSnI3.out
&CONTROL
calculation = ''
outdir = './out'
/
&SYSTEM
ecutwfc = 80
ecutrho = 320
occupations = 'fixed'
ntyp = 3
nat = 5
ibrav = 0
/
&ELECTRONS
conv_thr = 1e-14
/
&IONS
/
&CELL
press = 0
press_conv_thr = 0.05
/
ATOMIC_SPECIES
Cs 132.90545196 Cs-nc-pbesol.upf
Sn 118.71 Sn-nc-pbesol.upf
I 126.90447 I-nc-pbesol.upf
K_POINTS automatic
8 8 8 1 1 1
CELL_PARAMETERS angstrom
6.1821206415142775 0.0000000000000000 0.0000000000000000
0.0000000000000000 6.1821206415142775 0.0000000000000000
0.0000000000000000 0.0000000000000000 6.1821206415142775
ATOMIC_POSITIONS angstrom
Cs 3.0910603207571383 3.0910603207571383 3.0910603207571383
Sn 0.0000000000000000 0.0000000000000000 0.0000000000000000
I 3.0910603207571383 0.0000000000000000 0.0000000000000000
I 0.0000000000000000 0.0000000000000000 3.0910603207571383
I 0.0000000000000000 3.0910603207571383 0.0000000000000000
Perform a vc-relax calculation for CnSnI3 using the pw.x
program.
Copy
../inputs/pw.CnSnI3.in
in the current folder and modify the&CONTROL
namelist to do a vc-relax calculationcalculation=""
Open
submit.slurm
and modify npw to use R&G on 4 MPIs:GPUsSubmit the job file
sbatch submit.slurm
Check if convergence has been achieved.
Copy the output directory (
out/
)in the folder of step2.cp -r ./out ../step2/
Solution
Solution
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:10:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=4
6#SBATCH --ntasks-per-socket=2
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:4
9#SBATCH --job-name=phstep1
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13
14export WORK=/ceph/hpc/data/d2021-135-users
15
16module purge
17module use ${WORK}/modules
18
19module load QuantumESPRESSO/DEV-NVHPC-21.2
20
21export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
22export OMP_NUM_THREADS=1
23
24mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core "
25mpirun $mpiopt -np 4 pw.x -ni 1 -nk 1 -i pw.CnSnI3.in > pw.CnSnI3.out
&CONTROL
calculation = 'vc-relax'
outdir = './out'
/
&SYSTEM
ecutwfc = 80
ecutrho = 320
occupations = 'fixed'
ntyp = 3
nat = 5
ibrav = 0
/
&ELECTRONS
conv_thr = 1e-14
/
&IONS
/
&CELL
press = 0
press_conv_thr = 0.05
/
ATOMIC_SPECIES
Cs 132.90545196 Cs-nc-pbesol.upf
Sn 118.71 Sn-nc-pbesol.upf
I 126.90447 I-nc-pbesol.upf
K_POINTS automatic
8 8 8 1 1 1
CELL_PARAMETERS angstrom
6.1821206415142775 0.0000000000000000 0.0000000000000000
0.0000000000000000 6.1821206415142775 0.0000000000000000
0.0000000000000000 0.0000000000000000 6.1821206415142775
ATOMIC_POSITIONS angstrom
Cs 3.0910603207571383 3.0910603207571383 3.0910603207571383
Sn 0.0000000000000000 0.0000000000000000 0.0000000000000000
I 3.0910603207571383 0.0000000000000000 0.0000000000000000
I 0.0000000000000000 0.0000000000000000 3.0910603207571383
I 0.0000000000000000 3.0910603207571383 0.0000000000000000
Phonon calculation, step2
Files needed:
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:20:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=1
6#SBATCH --ntasks-per-socket=1
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:1
9#SBATCH --job-name=phstep2
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13#SBATCH --reservation=maxgpu
14
15export WORK=/ceph/hpc/data/d2021-135-users
16
17module purge
18module use ${WORK}/modules
19module load QuantumESPRESSO/DEV-NVHPC-21.2
20
21export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
22export OMP_NUM_THREADS=1
23
24mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core "
25mpirun $mpiopt -np 1 ph.x -ni 1 -nk 1 -i ph.CnSnI3.in > ph.CnSnI3.out
&inputph
prefix = ''
fildyn = 'harmdyn_support'
amass(1) =
amass(2) =
amass(3) =
tr2_ph = 1d-16
outdir = './out'
/
Perform a phonon calculation at Gamma for CnSnI3 using the ph.x
program.
Copy
../inputs/ph.CnSnI3.in
in the current folder and modify the&inputph
namelist ; add coordinates of the Gamma point&inputph prefix='' amass(1)= amass(2)= amass(3)= / X Y Z
Submit the jobfile to run
ph.x
on 1 MPI : GPUCheck the number of k points
awk '/number of k/' ph.CnSnI3.out
Check the number of irreducible representations
awk '/irreducible/' ph.CnSnI3.out
Check the dynamical matrix in dynmat.out
tail -n 97 harmdyn_support
Solution
Solution
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:20:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=1
6#SBATCH --ntasks-per-socket=1
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:1
9#SBATCH --job-name=phstep2
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13
14export WORK=/ceph/hpc/data/d2021-135-users
15
16module purge
17module use ${WORK}/modules
18module load QuantumESPRESSO/DEV-NVHPC-21.2
19
20export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
21export OMP_NUM_THREADS=1
22
23mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core "
24mpirun $mpiopt -np 1 ph.x -ni 1 -nk 1 -i ph.CnSnI3.in > ph.CnSnI3.out
&inputph
prefix = 'pwscf'
fildyn = 'harmdyn_support'
tr2_ph = 1d-16
outdir = './out'
/
0.0 0.0 0.0
ASR rule application, step 3
Files needed:
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:10:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=1
6#SBATCH --ntasks-per-socket=1
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:1
9#SBATCH --job-name=phstep3
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13#SBATCH --reservation=maxgpu
14
15export WORK=/ceph/hpc/data/d2021-135-users
16
17module purge
18module use ${WORK}/modules
19module load QuantumESPRESSO/DEV-NVHPC-21.2
20
21export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
22export OMP_NUM_THREADS=1
23
24mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core "
25mpirun $mpiopt -np 1 dynmat.x -ni 1 -nk 1 -i dyn.CnSnI3.in > dyn.CnSnI3.out
&input
fildyn = 'harmdyn_support',
asr = ''
/
Apply the Acoustic Sum Rule (ASR) with dynmat.x
Copy
../inputs/dyn.CnSnI3.in
and add the‘crystal’
ASR rule&input asr=''
Copy
../step2/harmdyn_support
in the current folderSubmit the job
Check phonon frequencies with ASR rule applied in
dyn.CnSnI3.out
Solution
Solution
&input
fildyn = 'harmdyn_support',
asr = 'crystal'
/
Program DYNMAT v.7.1 starts on 4Nov2022 at 23: 2:22
Git branch: develop_bands
Last git commit: 816aee40db40e6b706df606a62b57acfcd26b4df
Last git commit date: Fri Oct 21 11:20:44 2022 +0200
Last git commit subject: improvement for magnons
This program is part of the open-source Quantum ESPRESSO suite
for quantum simulation of materials; please cite
"P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009);
"P. Giannozzi et al., J. Phys.:Condens. Matter 29 465901 (2017);
"P. Giannozzi et al., J. Chem. Phys. 152 154105 (2020);
URL http://www.quantum-espresso.org",
in publications or presentations arising from this work. More details at
http://www.quantum-espresso.org/quote
Parallel version (MPI & OpenMP), running on 1 processor cores
Number of MPI processes: 1
Threads/MPI process: 1
MPI processes distributed on 1 nodes
488507 MiB available memory on the printing compute node when the environment starts
Reading Dynamical Matrix from file harmdyn_support
...Force constants read
...epsilon and Z* not read (not found on file)
Acoustic Sum Rule: || Z*(ASR) - Z*(orig)|| = 0.000000E+00
Acoustic Sum Rule: ||dyn(ASR) - dyn(orig)||= 9.509352E-04
A direction for q was not specified:TO-LO splitting will be absent
Polarizability (A^3 units)
multiply by 1.000000 for Clausius-Mossotti correction
0.000000 0.000000 0.000000
0.000000 0.000000 0.000000
0.000000 0.000000 0.000000
IR activities are in (D/A)^2/amu units
# mode [cm-1] [THz] IR
1 -0.00 -0.0000 0.0000
2 0.00 0.0000 0.0000
3 0.00 0.0000 0.0000
4 8.07 0.2419 0.0000
5 8.07 0.2419 0.0000
6 8.07 0.2419 0.0000
7 23.16 0.6943 0.0000
8 23.16 0.6943 0.0000
9 23.16 0.6943 0.0000
10 38.68 1.1597 0.0000
11 38.68 1.1597 0.0000
12 38.68 1.1597 0.0000
13 78.20 2.3443 0.0000
14 78.20 2.3443 0.0000
15 78.20 2.3443 0.0000
DYNMAT : 0.00s CPU 0.06s WALL
This run was terminated on: 23: 2:22 4Nov2022
=------------------------------------------------------------------------------=
JOB DONE.
=------------------------------------------------------------------------------=
Multi GPU offload with pools, step 4
Files needed:
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:20:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=2
6#SBATCH --ntasks-per-socket=1
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:2
9#SBATCH --job-name=phstep4
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13#SBATCH --reservation=maxgpu
14
15export WORK=/ceph/hpc/data/d2021-135-users
16
17module purge
18module use ${WORK}/modules
19module load QuantumESPRESSO/DEV-NVHPC-21.2
20
21export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
22export OMP_NUM_THREADS=1
23
24mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core"
25
26mpirun $mpiopt -np 2 ph.x -ni 1 -nk npools -i ph.CnSnI3.in > ph.CnSnI3.out
Perform a phonon calculation at Gamma on 2 GPUs for CnSnI3 using the ph.x
program.
Copy the input of step2
../step2/ph.CnSnI3.in
in the current folderCopy the
../step1/out
directory in the current folderModify
npools
insubmit.slurm
to distribute the calculation on 2 MPIs : GPUs with pool parallelizationSubmit the jobfile
sbatch submit.slurm
Check wall time of parallel execution
tail ph.CnSnI3.out
Solution
Solution
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:20:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=2
6#SBATCH --ntasks-per-socket=1
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:2
9#SBATCH --job-name=phstep4
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13
14export WORK=/ceph/hpc/data/d2021-135-users
15
16module purge
17module use ${WORK}/modules
18module load QuantumESPRESSO/DEV-NVHPC-21.2
19
20export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
21export OMP_NUM_THREADS=1
22
23mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core "
24
25mpirun $mpiopt -np 2 ph.x -ni 1 -nk 2 -i ph.CnSnI3.in > ph.CnSnI3.out
&inputph
prefix = ''
fildyn = 'harmdyn_support'
amass(1) =
amass(2) =
amass(3) =
tr2_ph = 1d-16
outdir = './out'
/
0.0 0.0 0.0
Multi gpu offload with images, step 5
Files needed:
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:30:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=4
6#SBATCH --ntasks-per-socket=2
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:4
9#SBATCH --job-name=phstep5
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13#SBATCH --reservation=maxgpu
14
15export WORK=/ceph/hpc/data/d2021-135-users
16
17module purge
18module use ${WORK}/modules
19module load QuantumESPRESSO/DEV-NVHPC-21.2
20
21export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
22export OMP_NUM_THREADS=1
23
24mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core "
25
26mpirun $mpiopt -np 4 ph.x -ni nimages -nk 1 -i ph.CnSnI3.in > out.0_0
27mpirun $mpiopt -np 1 ph.x -ni 1 -nk 1 -i ph.CnSnI3.recover.in > ph.CnSnI3.recover.out
&input
fildyn = 'harmdyn_support',
asr = ''
/
Perform a phonon calculation at Gamma on 4 GPUs for CnSnI3 using the ph.x
program.
Copy the input of step2
../step2/ph.CnSnI3.in
Copy
ph.CnSnI3.in
asph.CnSnI3.recover.in
and addrecover=.true.
in&inputph
Copy the
../step1/out
directory in the current folderModify
nimages
insubmit.slurm
to distribute the calculation on 4 MPIs : GPUs with image parallelizationSubmit the jobfile
sbatch submit.slurm
With image parallelism there is 1 output file for each image. These are named
out.*_0
, with * the image rank. Check the workload of each image$ awk '/I am image/ {x=NR+3} (NR<=x) {print $0} ' out.*_0
Compare wall times. Which images takes the longest time ? Why ?
Solution
Solution
1#!/bin/bash
2#SBATCH --nodes 1
3#SBATCH --time=00:30:00
4#SBATCH --partition=gpu
5#SBATCH --ntasks-per-node=4
6#SBATCH --ntasks-per-socket=2
7#SBATCH --cpus-per-task=32
8#SBATCH --gres=gpu:4
9#SBATCH --job-name=phstep5
10#SBATCH --error=err.job-%j
11#SBATCH --output=out.job-%j
12#SBATCH --hint=nomultithread
13
14export WORK=/ceph/hpc/data/d2021-135-users
15
16module purge
17module use ${WORK}/modules
18module load QuantumESPRESSO/DEV-NVHPC-21.2
19
20export ESPRESSO_PSEUDO=${PWD}/../../../pseudo
21export OMP_NUM_THREADS=1
22
23mpiopt="-mca pml ucx -mca btl ^uct,tcp,openib,vader --map-by socket:PE=32 --rank-by core "
24
25mpirun $mpiopt -np 4 ph.x -ni 4 -nk 1 -i ph.CnSnI3.in > out.0_0
26mpirun $mpiopt -np 1 ph.x -ni 1 -nk 1 -i ph.CnSnI3.recover.in > ph.CnSnI3.recover.out
EELS in bulk Silicon
Calculation of the electron energy loss spectra (EELS) of bulk silicon.
Submit files needed:
1#!/bin/bash
2#SBATCH --job-name=pwSi
3#SBATCH -N 1
4#SBATCH --ntasks=64
5#SBATCH --time=00:30:00
6#SBATCH --partition=cpu
7#SBATCH --ntasks-per-node=64
8#SBATCH --cpus-per-task=1
9#SBATCH --exclusive
10#SBATCH --reservation=maxcpu
11
12module purge
13module load QuantumESPRESSO/7.1-foss-2022a
14
15export OMP_NUM_THREADS=1
16
17mpirun -np 64 pw.x -nk 16 -i pw.Si.scf.in > pw.Si.scf.out
1#!/bin/bash
2#SBATCH --job-name=eelsSi
3#SBATCH -N 1
4#SBATCH --ntasks=64
5#SBATCH --time=00:30:00
6#SBATCH --partition=cpu
7#SBATCH --ntasks-per-node=64
8#SBATCH --cpus-per-task=1
9#SBATCH --exclusive
10#SBATCH --reservation=maxcpu
11
12module purge
13module load QuantumESPRESSO/7.1-foss-2022a
14
15export OMP_NUM_THREADS=1
16
17mpirun -np 64 turbo_eels.x -nk 16 -i turbo_eels.Si.tddfpt.in > turbo_eels.Si.tddfpt.out
1#!/bin/bash
2#SBATCH --job-name=specSi
3#SBATCH -N 1
4#SBATCH --ntasks=1
5#SBATCH --time=00:30:00
6#SBATCH --partition=cpu
7#SBATCH --ntasks-per-node=1
8#SBATCH --cpus-per-task=1
9#SBATCH --reservation=maxcpu
10
11module purge
12module load QuantumESPRESSO/7.1-foss-2022a
13
14export OMP_NUM_THREADS=1
15
16mpirun -np 1 turbo_spectrum.x -i turbo_spectrum.Si.pp.in > turbo_spectrum.Si.pp.out
Input files needed:
&control
calculation='scf'
restart_mode='from_scratch',
prefix='Sieels'
pseudo_dir = '../../pseudo'
outdir='./tempdir'
/
&system
ibrav = 2,
celldm(1) = 10.26,
nat = 2,
ntyp = 1,
ecutwfc = 20.0
/
&electrons
conv_thr = 1.0d-10
/
ATOMIC_SPECIES
Si 28.08 Si.upf
ATOMIC_POSITIONS {alat}
Si 0.00 0.00 0.00
Si 0.25 0.25 0.25
K_POINTS {automatic}
12 12 12 1 1 1
&lr_input
prefix = 'Sieels',
outdir = './tempdir',
restart_step = 200,
restart = .false.
/
&lr_control
calculator = 'lanczos',
itermax = 2000,
q1 = 0.866,
q2 = 0.000,
q3 = 0.000
/
&lr_input
prefix = 'Sieels',
outdir = './tempdir',
eels = .true.
itermax0 = 2000,
itermax = 10000,
extrapolation = "osc",
epsil = 0.035,
units = 1,
start = 0.0,
end = 50.0,
increment = 0.01
/
Step-by-step for running the tutorial can be found in the slides linked at the top of this page!
Calculation of the magnon spectra of bulk iron
Submit files needed:
1#!/bin/bash
2#SBATCH --job-name=pwFe
3#SBATCH -N 1
4#SBATCH --ntasks=64
5#SBATCH --time=00:30:00
6#SBATCH --partition=cpu
7#SBATCH --ntasks-per-node=64
8#SBATCH --cpus-per-task=1
9#SBATCH --exclusive
10#SBATCH --reservation=maxcpu
11
12module purge
13module load QuantumESPRESSO/7.1-foss-2022a
14
15export OMP_NUM_THREADS=1
16
17mpirun -np 64 pw.x -nk 16 -i pw.Fe.scf.in > pw.Fe.scf.out
1#!/bin/bash
2#SBATCH --job-name=magnonFe
3#SBATCH -N 1
4#SBATCH --ntasks=64
5#SBATCH --time=00:30:00
6#SBATCH --partition=cpu
7#SBATCH --ntasks-per-node=64
8#SBATCH --cpus-per-task=1
9#SBATCH --reservation=maxcpu
10
11module purge
12module load QuantumESPRESSO/7.1-foss-2022a
13
14export OMP_NUM_THREADS=1
15
16mpirun -np 64 turbo_magnon.x -nk 16 -i turbo_magnon.Fe.tddfpt.in > turbo_magnon.Fe.tddfpt.out
1#!/bin/bash
2#SBATCH --job-name=specFe
3#SBATCH -N 1
4#SBATCH --ntasks=1
5#SBATCH --time=00:30:00
6#SBATCH --partition=cpu
7#SBATCH --ntasks-per-node=1
8#SBATCH --cpus-per-task=1
9#SBATCH --reservation=maxcpu
10
11module purge
12module load QuantumESPRESSO/7.1-foss-2022a
13
14export OMP_NUM_THREADS=1
15
16mpirun -np 1 turbo_spectrum.x -i turbo_spectrum.Fe.pp.in > turbo_spectrum.Fe.pp.out
Input files needed:
&control
calculation='scf'
restart_mode='from_scratch',
outdir='./tempdir',
prefix='Femag'
pseudo_dir="../../pseudo"
verbosity='high'
/
&system
nosym = .true.
noinv = .true.
noncolin = .true.
lspinorb = .false.
ibrav = 3
celldm(1) = 5.406
nat = 1
ntyp = 1
ecutwfc = 40
occupations = 'smearing'
smearing = 'gaussian'
degauss = 0.01
starting_magnetization(1) = 0.15
/
&electrons
mixing_beta = 0.3
conv_thr = 1.d-9
/
ATOMIC_SPECIES
Fe 55.85 Fe.pz-n-nc.UPF
ATOMIC_POSITIONS alat
Fe 0.00000000 0.00000000 0.00000000
K_POINTS automatic
4 4 4 0 0 0
&lr_input
prefix = 'Femag',
outdir = './tempdir',
restart_step = 200,
restart = .false.
/
&lr_control
itermax = 5000,
q1 = 0.1d0,
q2 = 0.1d0,
q3 = 0.0d0,
pseudo_hermitian = .true.
ipol = 2
/
&lr_input
prefix = 'Femag',
outdir = './tempdir',
magnons = .true.,
units = 3,
itermax0 = 5000,
itermax = 15000,
extrapolation='osc',
epsil = 0.5,
ipol = 2,
start = 0.0d0,
increment = 0.1d0,
end = 28.0d0
/
Step-by-step for running the tutorial can be found in the slides linked at the top of this page!