File: //opt/cloudlinux/venv/lib64/python3.11/site-packages/lveapi.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © Cloud Linux GmbH & Cloud Linux Software, Inc 2010-2019 All Rights Reserved
#
# Licensed under CLOUD LINUX LICENSE AGREEMENT
# http://cloudlinux.com/docs/LICENSE.TXT
import os
import syslog
import pwd
from typing import Optional # NOQA
from clcommon.clproc import ProcLve
from clcommon import cpapi, ClPwd
from clcommon.cpapi.cpapiexceptions import NotSupported
from clveconfig import ve_config
from lve_utils.pylve_wrapper import PyLveError, PyLve # noqa: F401 — re-exported for callers
from websiteisolation import config as _ws_config
from websiteisolation import id_registry as _ws_id_registry
from websiteisolation.exceptions import LvdError
LVP_XML_TAG_NAME = "reseller"
LVE_NO_UBC = 1 << 1
LVE_NO_MAXENTER = 1 << 2
class NameMapError(Exception):
pass
class NameMapConfigError(NameMapError):
pass
class NameMapNotInitialized(NameMapError):
pass
class NameMap:
"""
Container for backend storing resellers_name<=>resellers_id map
As backend store use ve.cfg
Usage:
>>> name_map = NameMap()
>>> name_map.link_xml_node()
>>> name_map.id_list()
[1001]
"""
def __init__(self, xml_tag_name=LVP_XML_TAG_NAME):
self._xml_tag_name = xml_tag_name
self._xml_node = None
# Reseller name to id map (list of corteges)
self._reseller_id_name_map = None
def get_id(self, name):
for name_, id_ in self.load_from_node():
if name_ == name:
return id_
def get_name(self, id_):
for name_, _id in self.load_from_node():
if id_ == _id:
return name_
def id_list(self):
return [id_ for _, id_ in self.load_from_node()]
def link_xml_node(self, xml_node=None, use_cache=True):
"""
Initialize NameMap. If xml_node is none,
config will be loaded automatically
:param use_cache: Bool whether bypass ve.cfg xml cache
:param xml_node: !! DEPRECATED PARAM !!
this param is left only for
compatibility with our old code
"""
if xml_node is None:
# New mode, Load reseller_id, reseller_name pairs from ve.cfg to dictionary
self._xml_node = None
self._load_resellers_map_from_ve_cfg(use_cache)
else:
# For compatibility with our old code
self._xml_node = xml_node
self._reseller_id_name_map = None
def _load_resellers_map_from_ve_cfg(self, use_cache):
"""
Fills self._reseller_id_name_map from ve.cfg file
:return:
"""
self._reseller_id_name_map = []
ve_cfg, xml_node = self._try_get_xml_node(use_cache=use_cache)
for el_ in xml_node:
name = el_.getAttribute('user')
id_ = int(el_.getAttribute('id'))
if name and id_ and id_ not in self._reseller_id_name_map:
self._reseller_id_name_map.append((id_, name))
# Force delete XML object to avoid high memory load
del xml_node
del ve_cfg
def _try_get_xml_node(self, use_cache=True):
try:
ve_cfg, xml_node = ve_config.get_xml_config(use_cache=use_cache)
except ve_config.BadVeConfigException as e:
self._reseller_id_name_map = None
raise NameMapConfigError("Error happened while loading data from ve.cfg") from e
return ve_cfg, xml_node.getElementsByTagName(self._xml_tag_name)
def load_from_node(self):
"""
Obtain data from xml node as (name, id_) list
"""
if self._xml_node is None and self._reseller_id_name_map is None:
raise NameMapNotInitialized('Name map is not initialized. '
'Use obj.link_xml_node() to get data from config')
if self._xml_node:
# For compatibility with our old code
for el_ in self._xml_node.getElementsByTagName(self._xml_tag_name):
name = el_.getAttribute('user')
id_ = int(el_.getAttribute('id'))
if name and id_:
yield name, id_
if self._reseller_id_name_map:
# New mode, use resellers map
for id_, name in self._reseller_id_name_map:
yield name, id_
class LvpMap:
"""
Container for storing information about lve:lvp mapping
In which reseller container stored lve
"""
def __init__(self):
self.name_map = NameMap()
self._id_name_map = {}
self._name_id_map = {}
self._reseller_id_map_panel = None
self._pwd = ClPwd()
def _add_map(self, name, id_):
self._id_name_map[id_] = name
self._name_id_map[name] = id_
def pw_uid(self, name, default=None):
try:
return self._pwd.get_pw_by_name(name).pw_uid
except ClPwd.NoSuchUserException:
return default
def _get_panel_reseller_id(self, reseller):
# type: (str) -> Optional[int]
uid = self.pw_uid(reseller)
if uid is not None:
return uid
# in case when we cannot find reseller in passwd file
# let's ask control panel for reseller's id
if self._reseller_id_map_panel is None:
self._reseller_id_map_panel = cpapi.get_reseller_id_pairs()
return self._reseller_id_map_panel.get(reseller)
def get_reseller_id(self, name):
# type: (str) -> Optional[int]
"""
Convert reseller name to an LVE id.
It supports resellers without a system account (for Plesk compatibility).
"""
uid = self.name_map.get_id(name) or self._name_id_map.get(name)
if uid is not None:
return uid
try:
uid = self._get_panel_reseller_id(name)
except NotSupported:
uid = None
if uid is not None:
self._add_map(name, uid)
return uid
def get_reseller_name(self, id_):
"""
Convert reseller id to reseller name
It support resellers without system account (for Plesk compatibilyty)
"""
# add attribute fo in memory cache support
name = self.name_map.get_name(id_) or self._id_name_map.get(id_)
if name is not None:
return name
try:
name = pwd.getpwuid(id_).pw_name
if cpapi.is_reseller(name):
self._add_map(name, id_)
else:
name = None
except KeyError:
name = None
return name
def lve_lvp_pairs(self):
"""
This method loops over all user:reseller pairs in control panel
and returns appropriate lve_id:lvp_id pairs.
THIS METHOD WON'T CHECK IF 'RESELLER LIMITS' IS ENABLED IN ve.cfg
"""
resellers = set(cpapi.resellers())
reseller_uids = {}
for reseller in resellers:
try:
reseller_uids[reseller] = self.get_reseller_id(reseller)
except NotSupported:
syslog.syslog(
syslog.LOG_WARNING, f"Reseller {reseller} still exists in control panel, "
"but absent in /etc/passwd file")
for cplogin, reseller in cpapi.cpinfo(keyls=('cplogin', 'reseller')):
lve_id = self.pw_uid(cplogin)
# for some reasons (process of destroying user died
# or admin called 'pure' userdel), user may still exist in control panel
# but absent in /etc/passwd file; we can do nothing with that,
# so just skip and write a warning to syslog
if lve_id is None:
syslog.syslog(
syslog.LOG_WARNING, f"user {cplogin} still exists in control panel, "
"but absent in /etc/passwd file")
continue
lvp_id = reseller_uids.get(reseller, 0)
yield lve_id, lvp_id
@staticmethod
def resellers():
for reseller_name in cpapi.resellers():
yield reseller_name
@staticmethod
def reseller_uids(name):
"""
Obtain from control panel resellers uids
"""
uids = []
reseller_users = cpapi.reseller_users(name)
for user in reseller_users:
try:
id_ = pwd.getpwnam(user).pw_uid
uids.append(id_)
except KeyError:
syslog.syslog(
syslog.LOG_WARNING, f"user {user} still exists in control panel, "
"but absent in /etc/passwd file")
return uids
def lvp_lve_id_list(self, lvp_id):
reseller_name = self.get_reseller_name(lvp_id)
return self.reseller_uids(reseller_name)
class Lve:
def __init__(self, proc=None, py=None, map=None):
self.proc = proc or ProcLve()
self.py = py or PyLve()
self.map = map or LvpMap()
def lve_id_lvp_id_pairs(self):
"""
Obtain {lve id}:{lvp id} pairs iterator based on ve.cfg config
(detect enabled resellers containers)
This method (unlike LvpMap.lve_lvp_pairs) will check
if reseller is enabled in ve.cfg and return lvp_id=0
for users of reseller with disabled reseller limits
"""
enabled_lvp_id = set(self.map.name_map.id_list())
for lve_id, lvp_id in self.map.lve_lvp_pairs():
if lvp_id in enabled_lvp_id: # load map for enabled resellers only
yield lve_id, lvp_id
else:
yield lve_id, 0
def lve2lvp(self, lve_id):
"""
Obtain lvp id based on ve.cfg config (detect enabled resellers containers)
"""
for lve_id_, lvp_id_ in self.lve_id_lvp_id_pairs():
if lve_id == lve_id_:
return lvp_id_
return 0
def lve_destroy(self, lve_id, *args, **kwargs):
"""
safe destroy lve container with preserving lvp mapping
"""
if os.path.exists(self.proc.proc_lve_map()):
lvp_id = self.proc.map().get(lve_id, 0)
else:
lvp_id = 0
self.py.lve_destroy(lve_id, *args, **kwargs)
if lvp_id != 0:
try:
pwd.getpwuid(lve_id)
self.py.lve_lvp_map(lvp_id, lve_id)
except KeyError:
pass
def _map_domain_lves(self, lve_id, proc_map):
"""Place every domain LVE that belongs to *lve_id* under its user LVP.
Uses lve_lvp_move for domain LVEs that already exist in the kernel,
and falls back to lve_lvp_map for those that do not yet exist (so
the kernel will place them under the correct LVP when lve_setup
creates them later).
*proc_map* is an lve_id→lvp_id dict (from /proc/lve/map) used to
skip mappings that are already correct. Updated in-place so callers
see the new state.
"""
config = _ws_config.load_config(lve_id)
for d in config.domains:
if not d.name:
continue
try:
docroot = _ws_config.resolve_docroot(d.name)
except LvdError as e:
syslog.syslog(syslog.LOG_WARNING,
f"failed to resolve docroot for domain '{d.name}': {e}")
continue
domain_id = _ws_id_registry.assign_domain_id(lve_id, docroot)
if proc_map.get(domain_id) == lve_id:
continue
if domain_id in proc_map:
# Domain LVE exists in the kernel — move it.
self.py.lve_lvp_move(lve_id, domain_id)
else:
# Domain LVE does not exist yet — pre-register mapping.
self.py.lve_lvp_map(lve_id, domain_id)
proc_map[domain_id] = lve_id
def apply_domain_lve_limits(self, lve_id):
"""Apply per-domain limits from domains.json to each domain LVE.
Must be called after lve_set_default() resets all LVEs inside the
user LVP to the user's defaults. Each domain LVE is set to its
stored limits, or all-zeros (unlimited) when unconfigured.
"""
config = _ws_config.load_config(lve_id)
for d in config.domains:
if not d.name:
continue
try:
docroot = _ws_config.resolve_docroot(d.name)
except LvdError as e:
syslog.syslog(syslog.LOG_WARNING,
f"failed to resolve docroot for domain '{d.name}': {e}")
continue
domain_id = _ws_id_registry.assign_domain_id(lve_id, docroot)
settings = self.py.liblve_settings()
limits = d.limits.to_dict()
if 'cpu' in limits:
settings.ls_cpu = int(limits['cpu'])
if 'pmem' in limits:
pmem_bytes = int(limits['pmem'])
settings.ls_memory_phy = pmem_bytes // 4096 if pmem_bytes else 0
if 'io' in limits:
settings.ls_io = int(limits['io'])
if 'nproc' in limits:
settings.ls_nproc = int(limits['nproc'])
if 'iops' in limits:
settings.ls_iops = int(limits['iops'])
self.py.lve_setup(
domain_id, settings,
err_msg=f'Can`t setup domain LVE {domain_id}; error code {{code}}',
)
def _sync_map(self):
"""
Load lve_id:lvp_id map to kmod-lve.
For users with per-domain isolation configured, builds a nested LVP
hierarchy instead of a flat reseller mapping:
lvp<0>
[lvp<reseller_id>] ← reseller LVP (if enabled)
lvp<user_id> ← user-level LVP with isolation
lve<user_id> ← user LVE
lve<domain_id> ... ← domain LVEs
lvp<0>
[lvp<reseller_id>] ← reseller LVP (without isolation)
lve<user_id> ← user LVE (flat, existing behaviour)
"""
# load mapping information from kernel (/proc/lve/map)
proc_map_dict = self.proc.map()
# loop over user_id:reseller_id pairs
# lve_id_lvp_id_pairs includes all control panel users
# and checks for enabled resellers in ve.cfg
# so user of reseller without reseller limits
# will be listed in response like 'tuple(user_id, 0)'
if self.py.domains_supported():
isolated_users = set(_ws_config.find_all_lve_ids_with_config())
else:
isolated_users = set()
for lve_id, lvp_id in self.lve_id_lvp_id_pairs():
if lve_id in isolated_users:
# User has per-domain isolation: build nested LVP hierarchy.
# Ensure the parent (reseller) LVP exists first.
if lvp_id != 0 and not self.proc.exist_lvp(lvp_id=lvp_id):
self.py.lve_lvp_create(lvp_id)
# Create user LVP nested under the reseller (or at root
# when there is no reseller). lve_apply_all() already
# creates the nested LVP for reseller users; this handles
# the non-reseller case and the standalone sync-map call.
if not self.proc.exist_lvp(lvp_id=lve_id):
if lvp_id:
self.py.lve_lvp_create2(lve_id, lvp_id)
else:
self.py.lve_lvp_create(lve_id)
# Move the user LVE under the user LVP (both share lve_id).
if proc_map_dict.get(lve_id, 0) != lve_id:
self.py.lve_lvp_move(lve_id, lve_id)
proc_map_dict[lve_id] = lve_id
self._map_domain_lves(lve_id, proc_map=proc_map_dict)
else:
# Standard behaviour: move user LVE under reseller/root LVP.
if proc_map_dict.get(lve_id, 0) != lvp_id: # change map if needed only
if not self.proc.exist_lvp(lvp_id=lvp_id):
self.py.lve_lvp_create(lvp_id)
self.py.lve_lvp_move(lvp_id, lve_id)
proc_map_dict[lve_id] = lvp_id
def sync_map(self):
"""
wrapped _sync_map function for prevent error if some cpapi not supported
"""
try:
self._sync_map()
except NotSupported:
pass
def is_panel_supported(self):
"""
Check if current panel supported for reseller's limits;
:rtype: bool
"""
try:
return cpapi.is_reseller_limits_supported()
except NotSupported:
return False
def reseller_limit_supported(self):
"""
Check present all needed (kmod-lve, liblve, /proc/lve, panel) for manipulate resellers limits
"""
return all((self.py.resellers_supported(),
self.proc.resellers_supported(),
self.is_panel_supported()))
def is_lve10(self):
"""
Check present all needed (kmod-lve, liblve, /proc/lve) for manipulate resellers limits
"""
return all((self.py.resellers_supported(), self.proc.resellers_supported()))