Commit a4b05839 authored by psd's avatar psd

Exercise-13

parent 44cbb98f
...@@ -180,7 +180,7 @@ ...@@ -180,7 +180,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.7.1" "version": "3.7.0"
} }
}, },
"nbformat": 4, "nbformat": 4,
......
This diff is collapsed.
&control
verbosity='high'
calculation='scf'
restart_mode='from_scratch'
prefix='SI'
pseudo_dir = './',
outdir='./'
wf_collect=.true.
/
&system
ibrav=1
celldm(1)=10.26121694631474912930
nat=8,
ntyp=1,
ecutwfc = 30,
occupations='smearing',
degauss=0.001
/
&electrons
conv_thr = 1.0e-8
mixing_beta = 0.5
/
&ions
/
ATOMIC_SPECIES
Si 28.086 Si.pbe-mt_fhi.UPF
ATOMIC_POSITIONS crystal
Si 0.000000 0.000000 0.000000
Si 0.000000 0.500000 0.500000
Si 0.500000 0.000000 0.500000
Si 0.500000 0.500000 0.000000
Si 0.750000 0.250000 0.750000
Si 0.250000 0.250000 0.250000
Si 0.250000 0.750000 0.750000
Si 0.750000 0.750000 0.250000
K_POINTS automatic
1 1 1 0 0 0
This diff is collapsed.
#!/bin/bash --login
#SBATCH --job-name="TASK_0"
#SBATCH --nodes=1 # the number of ranks (total)
#SBATCH --ntasks-per-node=12 # the number of ranks per node
##SBATCH --ntasks-per-core=1 # enable this if you want hyperthreading
#SBATCH --cpus-per-task=1 # use this for threaded applications
#SBATCH --time=00:10:00
#SBATCH --constraint=gpu
#SBATCH --account=YOUR_ACCOUNT
##SBATCH --partition=normal
#SBATCH --output="c1"
module load daint-gpu
export CRAY_CUDA_MPS=1
module load QuantumESPRESSO/6.3-CrayIntel-18.08
#======START=====
a=5.43
alat=`echo "${a} / 0.529177" | bc -l`
cat >pw.in << EOF
&control
verbosity='high'
calculation='scf'
restart_mode='from_scratch'
prefix='SI'
pseudo_dir = './',
outdir='./'
wf_collect=.true.
/
&system
ibrav=1
celldm(1)=${alat}
nat=8,
ntyp=1,
ecutwfc = 30,
occupations='smearing',
degauss=0.001
/
&electrons
conv_thr = 1.0e-8
mixing_beta = 0.5
/
&ions
/
ATOMIC_SPECIES
Si 28.086 Si.pbe-mt_fhi.UPF
ATOMIC_POSITIONS crystal
Si 0.000000 0.000000 0.000000
Si 0.000000 0.500000 0.500000
Si 0.500000 0.000000 0.500000
Si 0.500000 0.500000 0.000000
Si 0.750000 0.250000 0.750000
Si 0.250000 0.250000 0.250000
Si 0.250000 0.750000 0.750000
Si 0.750000 0.750000 0.250000
K_POINTS automatic
1 1 1 0 0 0
EOF
srun -n $SLURM_NTASKS --ntasks-per-node=$SLURM_NTASKS_PER_NODE -c $SLURM_CPUS_PER_TASK --cpu_bind=rank --hint=nomultithread pw.x <pw.in > si.out
This diff is collapsed.
&control
verbosity='high'
calculation='scf'
restart_mode='from_scratch'
prefix='SI'
pseudo_dir = './',
outdir='./'
wf_collect=.true.
/
&system
ibrav=1
celldm(1)=10.26121694631474912930
nat=8,
ntyp=1,
ecutwfc = 30,
occupations='smearing',
degauss=0.001
/
&electrons
conv_thr = 1.0e-8
mixing_beta = 0.5
/
&ions
/
ATOMIC_SPECIES
Si 28.086 Si.pbe-mt_fhi.UPF
ATOMIC_POSITIONS crystal
Si 0.000000 0.000000 0.000000
Si 0.000000 0.500000 0.500000
Si 0.500000 0.000000 0.500000
Si 0.500000 0.500000 0.000000
Si 0.750000 0.250000 0.750000
Si 0.250000 0.250000 0.250000
Si 0.250000 0.750000 0.750000
Si 0.750000 0.750000 0.250000
K_POINTS automatic
2 2 2 0 0 0
This diff is collapsed.
#!/bin/bash --login
#SBATCH --job-name="TASK_0b"
#SBATCH --nodes=1 # the number of ranks (total)
#SBATCH --ntasks-per-node=12 # the number of ranks per node
##SBATCH --ntasks-per-core=1 # enable this if you want hyperthreading
#SBATCH --cpus-per-task=1 # use this for threaded applications
#SBATCH --time=00:10:00
#SBATCH --constraint=gpu
#SBATCH --account=YOUR_ACCOUNT
##SBATCH --partition=normal
#SBATCH --output="c1"
module load daint-gpu
export CRAY_CUDA_MPS=1
module load QuantumESPRESSO/6.3-CrayIntel-18.08
a=5.43
alat=`echo "${a} / 0.529177" | bc -l`
cat >pw.in << EOF
&control
verbosity='high'
calculation='scf'
restart_mode='from_scratch'
prefix='SI'
pseudo_dir = './',
outdir='./'
wf_collect=.true.
/
&system
ibrav=1
celldm(1)=${alat}
nat=8,
ntyp=1,
ecutwfc = 30,
occupations='smearing',
degauss=0.001
/
&electrons
conv_thr = 1.0e-8
mixing_beta = 0.5
/
&ions
/
ATOMIC_SPECIES
Si 28.086 Si.pbe-mt_fhi.UPF
ATOMIC_POSITIONS crystal
Si 0.000000 0.000000 0.000000
Si 0.000000 0.500000 0.500000
Si 0.500000 0.000000 0.500000
Si 0.500000 0.500000 0.000000
Si 0.750000 0.250000 0.750000
Si 0.250000 0.250000 0.250000
Si 0.250000 0.750000 0.750000
Si 0.750000 0.750000 0.250000
K_POINTS automatic
2 2 2 0 0 0
EOF
srun -n $SLURM_NTASKS --ntasks-per-node=$SLURM_NTASKS_PER_NODE -c $SLURM_CPUS_PER_TASK --cpu_bind=rank --hint=nomultithread pw.x <pw.in > si.out
This diff is collapsed.
&control
verbosity='high'
calculation='scf'
restart_mode='from_scratch'
prefix='SI'
pseudo_dir = './',
outdir='./'
wf_collect=.true.
/
&system
ibrav=1
celldm(1)=10.26121694631474912930
nat=8,
ntyp=1,
ecutwfc = 30,
occupations='smearing',
degauss=0.001
/
&electrons
conv_thr = 1.0e-8
mixing_beta = 0.5
/
&ions
/
ATOMIC_SPECIES
Si 28.086 Si.pbe-mt_fhi.UPF
ATOMIC_POSITIONS crystal
Si 0.000000 0.000000 0.000000
Si 0.000000 0.500000 0.500000
Si 0.500000 0.000000 0.500000
Si 0.500000 0.500000 0.000000
Si 0.750000 0.250000 0.750000
Si 0.250000 0.250000 0.250000
Si 0.250000 0.750000 0.750000
Si 0.750000 0.750000 0.250000
K_POINTS automatic
2 2 2 1 1 1
This diff is collapsed.
#!/bin/bash --login
#SBATCH --job-name="TASK_0c"
#SBATCH --nodes=1 # the number of ranks (total)
#SBATCH --ntasks-per-node=12 # the number of ranks per node
##SBATCH --ntasks-per-core=1 # enable this if you want hyperthreading
#SBATCH --cpus-per-task=1 # use this for threaded applications
#SBATCH --time=00:10:00
#SBATCH --constraint=gpu
#SBATCH --account=YOUR_ACCOUNT
##SBATCH --partition=normal
#SBATCH --output="c1"
module load daint-gpu
export CRAY_CUDA_MPS=1
module load QuantumESPRESSO/6.3-CrayIntel-18.08
a=5.43
alat=`echo "${a} / 0.529177" | bc -l`
cat >pw.in << EOF
&control
verbosity='high'
calculation='scf'
restart_mode='from_scratch'
prefix='SI'
pseudo_dir = './',
outdir='./'
wf_collect=.true.
/
&system
ibrav=1
celldm(1)=${alat}
nat=8,
ntyp=1,
ecutwfc = 30,
occupations='smearing',
degauss=0.001
/
&electrons
conv_thr = 1.0e-8
mixing_beta = 0.5
/
&ions
/
ATOMIC_SPECIES
Si 28.086 Si.pbe-mt_fhi.UPF
ATOMIC_POSITIONS crystal
Si 0.000000 0.000000 0.000000
Si 0.000000 0.500000 0.500000
Si 0.500000 0.000000 0.500000
Si 0.500000 0.500000 0.000000
Si 0.750000 0.250000 0.750000
Si 0.250000 0.250000 0.250000
Si 0.250000 0.750000 0.750000
Si 0.750000 0.750000 0.250000
K_POINTS automatic
2 2 2 1 1 1
EOF
srun -n $SLURM_NTASKS --ntasks-per-node=$SLURM_NTASKS_PER_NODE -c $SLURM_CPUS_PER_TASK --cpu_bind=rank --hint=nomultithread pw.x <pw.in > si.out
This diff is collapsed.
&control
verbosity='high'
calculation='scf'
restart_mode='from_scratch'
prefix='SI'
pseudo_dir = './',
outdir='./'
wf_collect=.true.
/
&system
ibrav=1
celldm(1)=10.26121694631474912930
nat=8,
ntyp=1,
ecutwfc = 30,
occupations='smearing',
degauss=0.001
/
&electrons
conv_thr = 1.0e-8
mixing_beta = 0.5
/
&ions
/
ATOMIC_SPECIES
Si 28.086 Si.pbe-mt_fhi.UPF
ATOMIC_POSITIONS crystal
Si 0.000000 0.000000 0.000000
Si 0.000000 0.500000 0.500000
Si 0.500000 0.000000 0.500000
Si 0.500000 0.500000 0.000000
Si 0.750000 0.250000 0.750000
Si 0.250000 0.250000 0.250000
Si 0.250000 0.750000 0.750000
Si 0.750000 0.750000 0.250000
K_POINTS automatic
3 3 3 0 0 0
This diff is collapsed.
#!/bin/bash --login
#SBATCH --job-name="TASK_1"
#SBATCH --nodes=1 # the number of ranks (total)
#SBATCH --ntasks-per-node=12 # the number of ranks per node
##SBATCH --ntasks-per-core=1 # enable this if you want hyperthreading
#SBATCH --cpus-per-task=1 # use this for threaded applications
#SBATCH --time=00:10:00
#SBATCH --constraint=gpu
#SBATCH --account=YOUR_ACCOUNT
##SBATCH --partition=normal
#SBATCH --output="c1"
module load daint-gpu
export CRAY_CUDA_MPS=1
module load QuantumESPRESSO/6.3-CrayIntel-18.08
a=5.43
alat=`echo "${a} / 0.529177" | bc -l`
cat >pw.in << EOF
&control
verbosity='high'
calculation='scf'
restart_mode='from_scratch'
prefix='SI'
pseudo_dir = './',
outdir='./'
wf_collect=.true.
/
&system
ibrav=1
celldm(1)=${alat}
nat=8,
ntyp=1,
ecutwfc = 30,
occupations='smearing',
degauss=0.001
/
&electrons
conv_thr = 1.0e-8
mixing_beta = 0.5
/
&ions
/
ATOMIC_SPECIES
Si 28.086 Si.pbe-mt_fhi.UPF
ATOMIC_POSITIONS crystal
Si 0.000000 0.000000 0.000000
Si 0.000000 0.500000 0.500000
Si 0.500000 0.000000 0.500000
Si 0.500000 0.500000 0.000000
Si 0.750000 0.250000 0.750000
Si 0.250000 0.250000 0.250000
Si 0.250000 0.750000 0.750000
Si 0.750000 0.750000 0.250000
K_POINTS automatic
3 3 3 0 0 0
EOF
srun -n $SLURM_NTASKS --ntasks-per-node=$SLURM_NTASKS_PER_NODE -c $SLURM_CPUS_PER_TASK --cpu_bind=rank --hint=nomultithread pw.x <pw.in > si.out
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#!/users/keimre/soft/miniconda3/bin/python
import numpy as np
import xml.etree.ElementTree as et
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import argparse
ang_2_bohr = 1.0/0.52917721067
hart_2_ev = 27.21138602
parser = argparse.ArgumentParser(
description='Plots QE bands.')
parser.add_argument(
'qe_xml',
metavar='FILENAME',
help='QE xml file.')
args = parser.parse_args()
def read_band_data_new_xml(xml_file):
"""
Reads data from QE bands calculations (new XML)
Returns:
- kpts[i_kpt] = [kx, ky, kz] in [2*pi/a]
- eigvals[i_kpt, i_band] in [eV]
- fermi_en in [eV]
"""
data_file_xml = et.parse(xml_file)
data_file_root = data_file_xml.getroot()
output_node = data_file_root.find('output')
# Find fermi
band_node = output_node.find('band_structure')
fermi_en = float(band_node.find('fermi_energy').text)*27.21138602
lsda = band_node.find('spinorbit').text
kpts = []
eigvals = []
for kpt in band_node.findall("ks_energies"):
k_coords = np.array(kpt.find('k_point').text.split(), dtype=float)
kpts.append(k_coords)
eig_vals = np.array(kpt.find('eigenvalues').text.split(), dtype=float)
eigvals.append(eig_vals*27.21138602)
kpts = np.array(kpts)
eigvals = np.array(eigvals)
return kpts, eigvals, fermi_en
kpts, eigvals, fermi_en = read_band_data_new_xml(args.qe_xml)
k_arr = [0.0]
cum_sum = 0.0
for i_k in range(1, len(kpts)):
k0 = kpts[i_k-1]
k1 = kpts[i_k]
dk = k1 - k0
cum_sum += np.linalg.norm(dk)
k_arr.append(cum_sum)
x_ticks = {}
for i_k, kpt in enumerate(kpts):
if np.allclose(kpt, np.array([0.0, 0.0, 0.0])):
x_ticks['G'] = k_arr[i_k]
if np.allclose(kpt, np.array([-0.5, 0.5, 0.5])):
x_ticks['L'] = k_arr[i_k]
if np.allclose(kpt, np.array([0.5, 0.5, 0.5])):
x_ticks['L'] = k_arr[i_k]
if np.allclose(kpt, np.array([-1.0, 0.0, 0.0])):
x_ticks['X'] = k_arr[i_k]
if np.allclose(kpt, np.array([0.5, 0.0, 0.5])):
x_ticks['X'] = k_arr[i_k]
plt.figure(figsize=(6, 10))
plt.plot(k_arr, eigvals, 'b', lw=2.0)
plt.xlim([0.0, np.max(k_arr)])
plt.ylim([np.min(eigvals), np.max(eigvals)])
plt.xticks(list(x_ticks.values()), list(x_ticks.keys()))
for xtick in list(x_ticks.values()):
plt.axvline(xtick, color='gray')
plt.ylabel("energy [eV]")
plt.savefig("./bands.png", dpi=200)
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.