2024-07-08
한어Русский языкEnglishFrançaisIndonesianSanskrit日本語DeutschPortuguêsΕλληνικάespañolItalianoSuomalainenLatina
# AI Aestiva castra # Datawhale # Aestivalis
Praeter crucem-validationis in originali Baseline, est etiam methodus optimization clavis, nempe pluma machinalis.
Quomodo lineamenta optimize referuntur ad modum praedictionis exemplar accurate emendare. Pluma ipsum ipsum saepe pars est quam homines cum profundis quaestionis intellegentiae domain bene facere possunt, quia de transformatione cogitare debemus.
Praeter lineamenta tempor, multae notae sunt quae pretiosam informationem eliciunt. Exempli gratia, InChI componitur ex partium serie et accuratiorem informationem praebet de structura hypothetica.exempli gratiaSatus pugio, formula hypothetica, connexio tabulae, hydrogenii atomi comes, multi- rotatabile vinculum computant, notitia stereochemical, isomer informationes, mixtura vel notitia tautomer, multiplicitas indicii crimen ac telas, etc.
Praeterea, si accurate exemplar emendare vis, non male notio exemplar mutare.
Ex chorda InChI videre possumus formulam hypotheticam immediate dari in/C47H61N7O6S
part. Hoc significat moleculum ex atomis 47 carbonis, 61 atomis hydrogenii, 7 atomis nitrogenis, 6 atomis oxygeni, et 1 atomi sulphuris;
Pondus molecularis inveniri potest multiplicando massam atomicam uniuscuiusque atomi suo numero et postea addendo.
sicut
Massa atomica carbonis (C) est circiter 12.01 g/mol
Massa atomica hydrogenii (H) est circiter 1.008 g/mol
Massa atomica nitrogenii (N) est circiter 14.01 g/mol
Massa atomica oxygeni (O) est circiter 16.00 g/mol
Massa sulphuris atomica (S) est circiter 32.07 g/mol
Multiplicatis quantitatibus et simul additis, pondus hypotheticum obtinemus.
Protinus atomorum diversorum numerum numera et dilata.
import pandas as pd
import re
atomic_masses = {
'H': 1.008, 'He': 4.002602, 'Li': 6.94, 'Be': 9.0122, 'B': 10.81, 'C': 12.01,
'N': 14.01, 'O': 16.00, 'F': 19.00, 'Ne': 20.180, 'Na': 22.990, 'Mg': 24.305,
'Al': 26.982, 'Si': 28.085, 'P': 30.97, 'S': 32.07, 'Cl': 35.45, 'Ar': 39.95,
'K': 39.10, 'Ca': 40.08, 'Sc': 44.956, 'Ti': 47.867, 'V': 50.942, 'Cr': 52.00,
'Mn': 54.938, 'Fe': 55.845, 'Co': 58.933, 'Ni': 58.69, 'Cu': 63.55, 'Zn': 65.38
}
# 函数用于解析单个InChI字符串
def parse_inchi(row):
inchi_str = row['InChI']
formula = ''
molecular_weight = 0
element_counts = {}
# 提取分子式
formula_match = re.search(r"InChI=1S/([^/] )/c", inchi_str)
if formula_match:
formula = formula_match.group(1)
# 计算分子量和原子计数
for element, count in re.findall(r"([A-Z][a-z]*)([0-9]*)", formula):
count = int(count) if count else 1
element_mass = atomic_masses.get(element.upper(), 0)
molecular_weight = element_mass * count
element_counts[element.upper()] = count
return pd.Series({
'Formula': formula,
'MolecularWeight': molecular_weight,
'ElementCounts': element_counts
})
# 应用函数到DataFrame的每一行
train[['Formula', 'MolecularWeight', 'ElementCounts']] = train.apply(parse_inchi, axis=1)
# 定义存在的key
keys = ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn']
# 创建一个空的DataFrame,列名为keys
df_expanded = pd.DataFrame({key: pd.Series() for key in keys})
# 遍历数据,填充DataFrame
for index, item in enumerate(train['ElementCounts'].values):
for key in keys:
# 将字典中的值填充到相应的列中
df_expanded.at[index, key] = item.get(key, 0)
df_expanded = pd.DataFrame(df_expanded)
Ut ultimo tempore, catboost exemplar uteris. Non temptavimus lightgbm et xgboost. Haec tria exempla in ordine currere potes, et tunc eventus trium exemplorum fusionis mediocris (est etiam area quae emendari potest. ).
def cv_model(clf, train_x, train_y, test_x, clf_name, seed = 2023):
folds = 5
kf = KFold(n_splits=folds, shuffle=True, random_state=seed)
oof = np.zeros(train_x.shape[0])
test_predict = np.zeros(test_x.shape[0])
cv_scores = []
for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):
print('************************************ {} ************************************'.format(str(i 1)))
trn_x, trn_y, val_x, val_y = train_x.iloc[train_index], train_y[train_index], train_x.iloc[valid_index], train_y[valid_index]
if clf_name == "lgb":
train_matrix = clf.Dataset(trn_x, label=trn_y)
valid_matrix = clf.Dataset(val_x, label=val_y)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'min_child_weight': 6,
'num_leaves': 2 ** 6,
'lambda_l2': 10,
'feature_fraction': 0.8,
'bagging_fraction': 0.8,
'bagging_freq': 4,
'learning_rate': 0.35,
'seed': 2024,
'nthread' : 16,
'verbose' : -1,
}
model = clf.train(params, train_matrix, 2000, valid_sets=[train_matrix, valid_matrix],
categorical_feature=[], verbose_eval=1000, early_stopping_rounds=100)
val_pred = model.predict(val_x, num_iteration=model.best_iteration)
test_pred = model.predict(test_x, num_iteration=model.best_iteration)
if clf_name == "xgb":
xgb_params = {
'booster': 'gbtree',
'objective': 'binary:logistic',
'num_class':3,
'max_depth': 5,
'lambda': 10,
'subsample': 0.7,
'colsample_bytree': 0.7,
'colsample_bylevel': 0.7,
'eta': 0.35,
'tree_method': 'hist',
'seed': 520,
'nthread': 16
}
train_matrix = clf.DMatrix(trn_x , label=trn_y)
valid_matrix = clf.DMatrix(val_x , label=val_y)
test_matrix = clf.DMatrix(test_x)
watchlist = [(train_matrix, 'train'),(valid_matrix, 'eval')]
model = clf.train(xgb_params, train_matrix, num_boost_round=2000, evals=watchlist, verbose_eval=1000, early_stopping_rounds=100)
val_pred = model.predict(valid_matrix)
test_pred = model.predict(test_matrix)
if clf_name == "cat":
params = {'learning_rate': 0.35, 'depth': 5, 'bootstrap_type':'Bernoulli','random_seed':2024,
'od_type': 'Iter', 'od_wait': 100, 'random_seed': 11, 'allow_writing_files': False}
model = clf(iterations=2000, **params)
model.fit(trn_x, trn_y, eval_set=(val_x, val_y),
metric_period=1000,
use_best_model=True,
cat_features=[],
verbose=1)
val_pred = model.predict_proba(val_x)
test_pred = model.predict_proba(test_x)
oof[valid_index] = val_pred
test_predict = test_pred / kf.n_splits
F1_score = f1_score(val_y, np.where(val_pred