Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
nmf/nmf/nmf.py
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
231 lines (212 sloc)
9.75 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
__author__ = "Kaustubh Sawant" | |
import sympy as sp | |
import pandas as pd | |
from itertools import combinations | |
from nmf.util import alphabet,divide_adsorbate,cluster_draw | |
from scipy.optimize import fsolve | |
class cluster: | |
def __init__(self,graph,num_ads,ads_names=None,central_site=None): | |
assert type(graph).__name__ == 'Graph',"Not a Graph" | |
self.graph = graph | |
for node in self.graph: | |
self.graph.nodes[node]['occ'] = '-' | |
self.num_ads = num_ads | |
if ads_names: | |
self.names = ads_names | |
else: | |
self.names = [alphabet[i] for i in range(self.num_ads)] | |
self.end_sites = set([k['typ'] for i,k in self.graph.nodes(data=True)]) | |
if central_site: | |
self.end_sites.remove(central_site) | |
else: | |
self.end_sites.remove("c") | |
self.bond_types = set([k['weight'] for i,j,k in self.graph.edges(data=True)]) | |
self.params = {} | |
self.make_y() #Energy for adsorption | |
self.make_h() #Adsorbate-Adsorbate interaction | |
self.make_a() #Energy of end atoms | |
self.pprint() #Print details of class | |
self.make_partition_function() | |
self.make_consistency_equations() | |
def __repr__(self): | |
return 'Cluster' | |
def make_y(self): | |
#Return list of symbol y with length of list equal to number of adsorbates. | |
self.y=[] | |
for i in range(0,self.num_ads): | |
y_symbol = sp.symbols(f"y_{self.names[i]}") | |
self.y.append(y_symbol) | |
self.params[f"{y_symbol}"]= y_symbol | |
return | |
def make_h(self): | |
''' | |
Return list of pandas dataframse. | |
Make dataframe for each type of bond. | |
Each dataframe is (number of adsorbates x number of adsorbates). | |
''' | |
self.h={} | |
index = [self.names[i] for i in range(self.num_ads)] | |
for bond in self.bond_types: | |
df = pd.DataFrame(index=index,columns=index) | |
for i in range(self.num_ads): | |
h_single = sp.symbols(f"h_{bond}_{self.names[i]}") | |
df[self.names[i]][self.names[i]]=h_single | |
self.params[f"{h_single}"]= h_single | |
if self.num_ads>1: | |
for sets in combinations(index,2): | |
h_dual = sp.symbols(f"h_{bond}_{sets[0]}_{sets[1]}") | |
df[sets[0]][sets[1]]=h_dual | |
df[sets[1]][sets[0]]=h_dual | |
self.params[f"{h_dual}"]= h_dual | |
self.h[f'{bond}']=df | |
return | |
def make_a(self): | |
''' | |
Return list of dictionaries with length of list equal to number of adsorbates. | |
Each dictionary consists of symbol for each end node type. | |
''' | |
self.a=[] | |
self.unknown_list=[] | |
for i in range(0,self.num_ads): | |
a_i = {} | |
for j,site in enumerate(self.end_sites): | |
a_i_symbol = sp.symbols(f"a_{i+1}_{j+1}", real=True) | |
a_i[f'{site}'] = a_i_symbol | |
self.unknown_list.append(a_i_symbol) | |
self.params[f"{a_i_symbol}"]= a_i_symbol | |
self.a.append(a_i) | |
return | |
def get_edge_value(self,u,v,w,G): | |
node1 = G.nodes[u] | |
node2 = G.nodes[v] | |
if node1['occ'] != '-' and node2['occ'] != '-': | |
if w['weight'] == 0: | |
return 0 | |
i = 1 | |
for key, value in self.h.items(): | |
if w['weight'] == int(key): | |
return value[node1['occ']][node2['occ']] | |
i+=1 | |
else: | |
return 1 | |
def make_partition_function(self): | |
print("\nCalculating partition function") | |
self.complete_site_list=[] | |
G = self.graph.copy() | |
self.total_expr = 1 #Intitialize the expression (1:bare site) | |
for occ in range(1,len(G)+1): #Loop over number of sites to be filled | |
for sets in combinations(G,occ): #loop over combinations for given number of site filling | |
all_lists = divide_adsorbate(self.num_ads,sets) #Divide sites among adsorbates | |
for lists in all_lists: #loop over all the possibilities | |
th=1 #Intitalize site term | |
for i in range(self.num_ads): #loop over number of adsorbates | |
for j in lists[i]: #loop over sites | |
G.nodes[j]["occ"]=self.names[i] | |
th *= self.y[i] | |
for k in self.end_sites: #loop over end types | |
if G.nodes[j]["typ"]==k: | |
th *= self.a[i][k] | |
for u,v,w in G.edges(data=True): | |
th *= self.get_edge_value(u,v,w,G) | |
self.total_expr += th | |
self.complete_site_list.append({'graph':G.copy(),'expr':th,}) | |
for i in G.nodes(): | |
G.nodes[i]["occ"]='-' | |
#print(f"z={self.total_expr}") | |
return self.total_expr | |
def make_consistency_equations(self): | |
self.complete_df = pd.DataFrame(self.complete_site_list) | |
#Make empty list of dictionaries for z for each node | |
self.node_eqns = [] | |
for node in self.graph.nodes(): | |
node_eqns_i = {} | |
for ads in range(self.num_ads): | |
node_eqns_i[f"{self.names[ads]}"]=0 | |
self.node_eqns.append(node_eqns_i) | |
#Fill the list created earlier with the correct terms | |
for index, row in self.complete_df.iterrows(): | |
sub_G = row['graph'] | |
for node,data in sub_G.nodes(data=True): | |
for ads in range(self.num_ads): | |
if data['occ'] == self.names[ads]: | |
self.node_eqns[node][f"{self.names[ads]}"] += row['expr'] | |
#Make the eqns of the form node(i)-node(0) = 0 | |
self.const_eqns = [] | |
self.const_eqns_expr = [] | |
index = [self.names[i] for i in range(self.num_ads)] | |
self.theta_eqns = pd.DataFrame(index=index,columns=self.end_sites) | |
print("\nCalculating Consistency Equations") | |
idx=1 | |
for ads in range(self.num_ads): | |
for site in self.end_sites: | |
for node,data in self.graph.nodes(data=True): | |
if data['typ'] == site: | |
print(f"Eqn{idx}: node {node} - node 0 == 0 (Site : {site}, Ads : {self.names[ads]})") | |
self.const_eqns.append(sp.Eq(self.node_eqns[node][self.names[ads]]-self.node_eqns[0][self.names[ads]],0)) | |
self.const_eqns_expr.append(self.node_eqns[node][self.names[ads]]-self.node_eqns[0][self.names[ads]]) | |
self.theta_eqns[f"{site}"][f"{self.names[ads]}"] = self.node_eqns[node][f"{self.names[ads]}"]/self.total_expr | |
idx+=1 | |
break | |
index = [self.names[i] for i in range(self.num_ads)] | |
self.theta = pd.DataFrame(index=index,columns=self.end_sites) | |
self.ln = 0 | |
return | |
def get_theta(self,params, initial_guess=None, maxsteps=50, check_params=True, verify=False): | |
if check_params: | |
key_list = list(params.keys()) | |
for i in self.y: | |
assert str(i) in key_list, "Missing Input Values" | |
#sub_eqns = [None] * len(self.const_eqns) | |
sub_eqns_expr = [None] * len(self.const_eqns) | |
initial_guess1 = [0.5] * len(self.unknown_list) | |
if initial_guess: | |
initial_guess1 = initial_guess | |
for idx,eqn in enumerate(self.const_eqns): | |
#sub_eqns[idx] = eqn.subs(params) | |
sub_eqns_expr[idx] = self.const_eqns_expr[idx].subs(params) | |
#self.roots = sp.nsolve(sub_eqns,self.unknown_list,initial_guess1,maxsteps=maxsteps, verify=verify) | |
self.f = sp.lambdify([self.unknown_list],sub_eqns_expr,'numpy') | |
self.roots = fsolve(self.f,initial_guess1) | |
for index, row in self.theta_eqns.iterrows(): | |
for key in row.keys(): | |
self.theta[key][row.name] = row[key].subs(params) | |
for idx,a in enumerate(self.unknown_list): | |
if self.ln: | |
self.theta[key][row.name] = self.theta[key][row.name].subs(a,sp.exp(self.roots[idx])) | |
else: | |
self.theta[key][row.name] = self.theta[key][row.name].subs(a,self.roots[idx]) | |
return self.theta | |
def convert_ln(self): | |
for idx,eqn in enumerate(self.const_eqns): | |
for a in self.unknown_list: | |
self.const_eqns_expr[idx] = self.const_eqns_expr[idx].subs(a,sp.exp(a)) | |
self.ln = 1 | |
return | |
def add_unknown(self,symbol,eqn): | |
if symbol not in self.unknown_list: | |
self.unknown_list.append(symbol) | |
self.const_eqns.append(eqn) | |
self.const_eqns_expr.append(eqn.lhs) | |
return | |
def pprint(self): | |
#Print all the symbols used. | |
print("Known Terms") | |
print("Ads\tSymbol") | |
for idx,i in enumerate(self.y): | |
print(f"{self.names[idx]}\t{i}") | |
print("\nAds\tAds\tBond\tInteraction-Symbol") | |
for bond in self.h: | |
if bond!='0': | |
for ads1 in self.h[bond]: | |
for ads2, h_symbol in self.h[bond][ads1].iteritems(): | |
print(f"{ads1}\t{ads2}\t{bond}\t{h_symbol}") | |
if h_symbol in self.unknown_list: | |
print(f"{h_symbol} is unknown") | |
print("\nUnknown Terms") | |
print("Ads\tSite\tEnergy-Symbol") | |
for idx,ads1 in enumerate(self.a): | |
for key,value in ads1.items(): | |
print(f"{self.names[idx]}\t{key}\t{value}") | |
return | |
def draw(self): | |
return cluster_draw(self.graph) |