body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def get_triples(self, graph_type: str) -> str:
'Retrieves the contents of the specified graph as triples encoded in turtle format\n\n Parameters\n ----------\n graph_type : str\n Graph type, either "instance" or "schema".\n\n Raises\n ------\n InterfaceError\n ... | -9,054,580,359,083,694,000 | Retrieves the contents of the specified graph as triples encoded in turtle format
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
Raises
------
InterfaceError
if the client does not connect to a database
Returns
-------
str | terminusdb_client/woqlclient/woqlClient.py | get_triples | terminusdb/woql-client-p | python | def get_triples(self, graph_type: str) -> str:
'Retrieves the contents of the specified graph as triples encoded in turtle format\n\n Parameters\n ----------\n graph_type : str\n Graph type, either "instance" or "schema".\n\n Raises\n ------\n InterfaceError\n ... |
def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
'Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents\n\n Parameters\n ----------\n graph_type : str\n Graph type, either "instance" or "sch... | -9,104,398,038,183,055,000 | Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceE... | terminusdb_client/woqlclient/woqlClient.py | update_triples | terminusdb/woql-client-p | python | def update_triples(self, graph_type: str, turtle, commit_msg: str) -> None:
'Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents\n\n Parameters\n ----------\n graph_type : str\n Graph type, either "instance" or "sch... |
def insert_triples(self, graph_type: str, turtle, commit_msg: Optional[str]=None) -> None:
'Inserts into the specified graph with the triples encoded in turtle format.\n\n Parameters\n ----------\n graph_type : str\n Graph type, either "instance" or "schema".\n turtle\n ... | -8,400,336,033,088,321,000 | Inserts into the specified graph with the triples encoded in turtle format.
Parameters
----------
graph_type : str
Graph type, either "instance" or "schema".
turtle
Valid set of triples in Turtle format.
commit_msg : str
Commit message.
Raises
------
InterfaceError
if the client does not connect to a ... | terminusdb_client/woqlclient/woqlClient.py | insert_triples | terminusdb/woql-client-p | python | def insert_triples(self, graph_type: str, turtle, commit_msg: Optional[str]=None) -> None:
'Inserts into the specified graph with the triples encoded in turtle format.\n\n Parameters\n ----------\n graph_type : str\n Graph type, either "instance" or "schema".\n turtle\n ... |
def query_document(self, document_template: dict, graph_type: str='instance', skip: int=0, count: Optional[int]=None, as_list: bool=False, get_data_version: bool=False, **kwargs) -> Union[(Iterable, list)]:
'Retrieves all documents that match a given document template\n\n Parameters\n ----------\n ... | -103,947,400,020,023,520 | Retrieves all documents that match a given document template
Parameters
----------
document_template : dict
Template for the document that is being retrived
graph_type : str, optional
Graph type, either "instance" or "schema".
as_list: bool
If the result returned as list rather than an iterator.
get_data_v... | terminusdb_client/woqlclient/woqlClient.py | query_document | terminusdb/woql-client-p | python | def query_document(self, document_template: dict, graph_type: str='instance', skip: int=0, count: Optional[int]=None, as_list: bool=False, get_data_version: bool=False, **kwargs) -> Union[(Iterable, list)]:
'Retrieves all documents that match a given document template\n\n Parameters\n ----------\n ... |
def get_document(self, iri_id: str, graph_type: str='instance', get_data_version: bool=False, **kwargs) -> dict:
'Retrieves the document of the iri_id\n\n Parameters\n ----------\n iri_id : str\n Iri id for the docuemnt that is retriving\n graph_type : str, optional\n ... | -6,171,475,609,696,768,000 | Retrieves the document of the iri_id
Parameters
----------
iri_id : str
Iri id for the docuemnt that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
get_data_version: bool
If the data version of the document(s) should be obtained. If True, the method return the result and... | terminusdb_client/woqlclient/woqlClient.py | get_document | terminusdb/woql-client-p | python | def get_document(self, iri_id: str, graph_type: str='instance', get_data_version: bool=False, **kwargs) -> dict:
'Retrieves the document of the iri_id\n\n Parameters\n ----------\n iri_id : str\n Iri id for the docuemnt that is retriving\n graph_type : str, optional\n ... |
def get_documents_by_type(self, doc_type: str, graph_type: str='instance', skip: int=0, count: Optional[int]=None, as_list: bool=False, get_data_version=False, **kwargs) -> Union[(Iterable, list)]:
'Retrieves the documents by type\n\n Parameters\n ----------\n doc_type : str\n Specif... | 1,406,150,879,367,926,300 | Retrieves the documents by type
Parameters
----------
doc_type : str
Specific type for the docuemnts that is retriving
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of ... | terminusdb_client/woqlclient/woqlClient.py | get_documents_by_type | terminusdb/woql-client-p | python | def get_documents_by_type(self, doc_type: str, graph_type: str='instance', skip: int=0, count: Optional[int]=None, as_list: bool=False, get_data_version=False, **kwargs) -> Union[(Iterable, list)]:
'Retrieves the documents by type\n\n Parameters\n ----------\n doc_type : str\n Specif... |
def get_all_documents(self, graph_type: str='instance', skip: int=0, count: Optional[int]=None, as_list: bool=False, get_data_version: bool=False, **kwargs) -> Union[(Iterable, list, tuple)]:
'Retrieves all avalibale the documents\n\n Parameters\n ----------\n graph_type : str, optional\n ... | 949,907,830,305,204,700 | Retrieves all avalibale the documents
Parameters
----------
graph_type : str, optional
Graph type, either "instance" or "schema".
skip: int
The starting posiion of the returning results, default to be 0
count: int or None
The maximum number of returned result, if None (default) it will return all of the av... | terminusdb_client/woqlclient/woqlClient.py | get_all_documents | terminusdb/woql-client-p | python | def get_all_documents(self, graph_type: str='instance', skip: int=0, count: Optional[int]=None, as_list: bool=False, get_data_version: bool=False, **kwargs) -> Union[(Iterable, list, tuple)]:
'Retrieves all avalibale the documents\n\n Parameters\n ----------\n graph_type : str, optional\n ... |
def get_existing_classes(self):
'Get all the existing classes (only ids) in a database.'
all_existing_obj = self.get_all_documents(graph_type='schema')
all_existing_class = {}
for item in all_existing_obj:
if item.get('@id'):
all_existing_class[item['@id']] = item
return all_exis... | 2,001,867,462,390,230,800 | Get all the existing classes (only ids) in a database. | terminusdb_client/woqlclient/woqlClient.py | get_existing_classes | terminusdb/woql-client-p | python | def get_existing_classes(self):
all_existing_obj = self.get_all_documents(graph_type='schema')
all_existing_class = {}
for item in all_existing_obj:
if item.get('@id'):
all_existing_class[item['@id']] = item
return all_existing_class |
def insert_document(self, document: Union[(dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], graph_type: str='instance', full_replace: bool=False, commit_msg: Optional[str]=None, last_data_version: Optional[str]=None, compress: Union[(str, int)]=1024) -> None:
'Inserts the specified do... | 4,374,205,126,363,328,000 | Inserts the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be inserted.
graph_type : str
Graph type, either "inference", "instance" or "schema".
full_replace:: bool
If True then the whole graph will be replaced. WARNING: you should also supply the context object a... | terminusdb_client/woqlclient/woqlClient.py | insert_document | terminusdb/woql-client-p | python | def insert_document(self, document: Union[(dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], graph_type: str='instance', full_replace: bool=False, commit_msg: Optional[str]=None, last_data_version: Optional[str]=None, compress: Union[(str, int)]=1024) -> None:
'Inserts the specified do... |
def replace_document(self, document: Union[(dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], graph_type: str='instance', commit_msg: Optional[str]=None, last_data_version: Optional[str]=None, compress: Union[(str, int)]=1024, create: bool=False) -> None:
'Updates the specified documen... | -4,453,975,394,859,476,500 | Updates the specified document(s)
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used to check if the document has been c... | terminusdb_client/woqlclient/woqlClient.py | replace_document | terminusdb/woql-client-p | python | def replace_document(self, document: Union[(dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], graph_type: str='instance', commit_msg: Optional[str]=None, last_data_version: Optional[str]=None, compress: Union[(str, int)]=1024, create: bool=False) -> None:
'Updates the specified documen... |
def update_document(self, document: Union[(dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], graph_type: str='instance', commit_msg: Optional[str]=None, last_data_version: Optional[str]=None, compress: Union[(str, int)]=1024) -> None:
'Updates the specified document(s). Add the documen... | 4,663,193,108,761,359,000 | Updates the specified document(s). Add the document if not existed.
Parameters
----------
document: dict or list of dict
Document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last version before the update, used t... | terminusdb_client/woqlclient/woqlClient.py | update_document | terminusdb/woql-client-p | python | def update_document(self, document: Union[(dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], graph_type: str='instance', commit_msg: Optional[str]=None, last_data_version: Optional[str]=None, compress: Union[(str, int)]=1024) -> None:
'Updates the specified document(s). Add the documen... |
def delete_document(self, document: Union[(str, list, dict, Iterable)], graph_type: str='instance', commit_msg: Optional[str]=None, last_data_version: Optional[str]=None) -> None:
'Delete the specified document(s)\n\n Parameters\n ----------\n document: str or list of str\n Document(... | -5,628,106,040,500,408,000 | Delete the specified document(s)
Parameters
----------
document: str or list of str
Document(s) (as dictionary or DocumentTemplate objects) or id(s) of document(s) to be updated.
graph_type : str
Graph type, either "instance" or "schema".
commit_msg : str
Commit message.
last_data_version : str
Last ve... | terminusdb_client/woqlclient/woqlClient.py | delete_document | terminusdb/woql-client-p | python | def delete_document(self, document: Union[(str, list, dict, Iterable)], graph_type: str='instance', commit_msg: Optional[str]=None, last_data_version: Optional[str]=None) -> None:
'Delete the specified document(s)\n\n Parameters\n ----------\n document: str or list of str\n Document(... |
def has_doc(self, doc_id: str, graph_type: str='instance') -> bool:
'Check if a certain document exist in a database\n\n Parameters\n ----------\n doc_id: str\n Id of document to be checked.\n graph_type : str\n Graph type, either "instance" or "schema".\n\n ... | -5,075,576,481,415,718,000 | Check if a certain document exist in a database
Parameters
----------
doc_id: str
Id of document to be checked.
graph_type : str
Graph type, either "instance" or "schema".
returns
-------
Bool
if the document exist | terminusdb_client/woqlclient/woqlClient.py | has_doc | terminusdb/woql-client-p | python | def has_doc(self, doc_id: str, graph_type: str='instance') -> bool:
'Check if a certain document exist in a database\n\n Parameters\n ----------\n doc_id: str\n Id of document to be checked.\n graph_type : str\n Graph type, either "instance" or "schema".\n\n ... |
def get_class_frame(self, class_name):
'Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.\n\n Parameters\n ----------\n class_name: str\n Name of the class\n\n returns\n -------\n dict\n Dic... | -2,056,334,198,750,349,600 | Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.
Parameters
----------
class_name: str
Name of the class
returns
-------
dict
Dictionary containing information | terminusdb_client/woqlclient/woqlClient.py | get_class_frame | terminusdb/woql-client-p | python | def get_class_frame(self, class_name):
'Get the frame of the class of class_name. Provide information about all the avaliable properties of that class.\n\n Parameters\n ----------\n class_name: str\n Name of the class\n\n returns\n -------\n dict\n Dic... |
def commit(self):
'Not implementated: open transactions currently not suportted. Please check back later.' | 4,602,813,655,849,661,000 | Not implementated: open transactions currently not suportted. Please check back later. | terminusdb_client/woqlclient/woqlClient.py | commit | terminusdb/woql-client-p | python | def commit(self):
|
def query(self, woql_query: Union[(dict, WOQLQuery)], commit_msg: Optional[str]=None, get_data_version: bool=False, last_data_version: Optional[str]=None) -> Union[(dict, str)]:
'Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents\n\n Param... | 4,067,560,774,186,981,000 | Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents
Parameters
----------
woql_query : dict or WOQLQuery object
A woql query as an object or dict
commit_mg : str
A message that will be written to the commit log to describe the change
get_data... | terminusdb_client/woqlclient/woqlClient.py | query | terminusdb/woql-client-p | python | def query(self, woql_query: Union[(dict, WOQLQuery)], commit_msg: Optional[str]=None, get_data_version: bool=False, last_data_version: Optional[str]=None) -> Union[(dict, str)]:
'Updates the contents of the specified graph with the triples encoded in turtle format Replaces the entire graph contents\n\n Param... |
def create_branch(self, new_branch_id: str, empty: bool=False) -> None:
'Create a branch starting from the current branch.\n\n Parameters\n ----------\n new_branch_id : str\n New branch identifier.\n empty : bool\n Create an empty branch if true (no starting commit)... | -8,907,302,686,537,837,000 | Create a branch starting from the current branch.
Parameters
----------
new_branch_id : str
New branch identifier.
empty : bool
Create an empty branch if true (no starting commit)
Raises
------
InterfaceError
if the client does not connect to a database | terminusdb_client/woqlclient/woqlClient.py | create_branch | terminusdb/woql-client-p | python | def create_branch(self, new_branch_id: str, empty: bool=False) -> None:
'Create a branch starting from the current branch.\n\n Parameters\n ----------\n new_branch_id : str\n New branch identifier.\n empty : bool\n Create an empty branch if true (no starting commit)... |
def delete_branch(self, branch_id: str) -> None:
'Delete a branch\n\n Parameters\n ----------\n branch_id : str\n Branch to delete\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a database\n '
self._check_connection... | 5,665,887,188,130,106,000 | Delete a branch
Parameters
----------
branch_id : str
Branch to delete
Raises
------
InterfaceError
if the client does not connect to a database | terminusdb_client/woqlclient/woqlClient.py | delete_branch | terminusdb/woql-client-p | python | def delete_branch(self, branch_id: str) -> None:
'Delete a branch\n\n Parameters\n ----------\n branch_id : str\n Branch to delete\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a database\n '
self._check_connection... |
def pull(self, remote: str='origin', remote_branch: Optional[str]=None, message: Optional[str]=None, author: Optional[str]=None) -> dict:
'Pull updates from a remote repository to the current database.\n\n Parameters\n ----------\n remote: str\n remote to pull from, default "origin"\... | -717,244,658,174,841,200 | Pull updates from a remote repository to the current database.
Parameters
----------
remote: str
remote to pull from, default "origin"
remote_branch: str, optional
remote branch to pull from, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option t... | terminusdb_client/woqlclient/woqlClient.py | pull | terminusdb/woql-client-p | python | def pull(self, remote: str='origin', remote_branch: Optional[str]=None, message: Optional[str]=None, author: Optional[str]=None) -> dict:
'Pull updates from a remote repository to the current database.\n\n Parameters\n ----------\n remote: str\n remote to pull from, default "origin"\... |
def fetch(self, remote_id: str) -> dict:
'Fatch the brach from a remote\n\n Parameters\n ----------\n remote_id: str\n id of the remote\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a database'
self._check_connection()
... | -7,423,922,888,655,255,000 | Fatch the brach from a remote
Parameters
----------
remote_id: str
id of the remote
Raises
------
InterfaceError
if the client does not connect to a database | terminusdb_client/woqlclient/woqlClient.py | fetch | terminusdb/woql-client-p | python | def fetch(self, remote_id: str) -> dict:
'Fatch the brach from a remote\n\n Parameters\n ----------\n remote_id: str\n id of the remote\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a database'
self._check_connection()
... |
def push(self, remote: str='origin', remote_branch: Optional[str]=None, message: Optional[str]=None, author: Optional[str]=None) -> dict:
'Push changes from a branch to a remote repo\n\n Parameters\n ----------\n remote: str\n remote to push to, default "origin"\n remote_branc... | -6,826,902,822,942,968,000 | Push changes from a branch to a remote repo
Parameters
----------
remote: str
remote to push to, default "origin"
remote_branch: str, optional
remote branch to push to, default to be your current barnch
message: str, optional
optional commit message
author: str, optional
option to overide the author of... | terminusdb_client/woqlclient/woqlClient.py | push | terminusdb/woql-client-p | python | def push(self, remote: str='origin', remote_branch: Optional[str]=None, message: Optional[str]=None, author: Optional[str]=None) -> dict:
'Push changes from a branch to a remote repo\n\n Parameters\n ----------\n remote: str\n remote to push to, default "origin"\n remote_branc... |
def rebase(self, branch: Optional[str]=None, commit: Optional[str]=None, rebase_source: Optional[str]=None, message: Optional[str]=None, author: Optional[str]=None) -> dict:
'Rebase the current branch onto the specified remote branch. Need to specify one of \'branch\',\'commit\' or the \'rebase_source\'.\n\n ... | -2,819,286,272,856,279,600 | Rebase the current branch onto the specified remote branch. Need to specify one of 'branch','commit' or the 'rebase_source'.
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
branch : str, optional
the branch for the rebase
rebase_source : str, optional
the source branch for ... | terminusdb_client/woqlclient/woqlClient.py | rebase | terminusdb/woql-client-p | python | def rebase(self, branch: Optional[str]=None, commit: Optional[str]=None, rebase_source: Optional[str]=None, message: Optional[str]=None, author: Optional[str]=None) -> dict:
'Rebase the current branch onto the specified remote branch. Need to specify one of \'branch\',\'commit\' or the \'rebase_source\'.\n\n ... |
def reset(self, commit: Optional[str]=None, soft: bool=False, use_path: bool=False) -> None:
'Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client wil... | -2,691,294,074,058,396,000 | Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client will only reference to that commit and can be reset to the newest commit when done.
Raises
------
In... | terminusdb_client/woqlclient/woqlClient.py | reset | terminusdb/woql-client-p | python | def reset(self, commit: Optional[str]=None, soft: bool=False, use_path: bool=False) -> None:
'Reset the current branch HEAD to the specified commit path. If `soft` is not True, it will be a hard reset, meaning reset to that commit in the backend and newer commit will be wipped out. If `soft` is True, the client wil... |
def optimize(self, path: str) -> None:
'Optimize the specified path.\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a database\n\n Notes\n -----\n The "remote" repo can live in the local database.\n\n Parameters\n ---------... | -5,317,494,528,360,595,000 | Optimize the specified path.
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
path : string
Path to optimize, for instance admin/database/_meta for the repo graph.
Examples
--------
>>> client = WOQL... | terminusdb_client/woqlclient/woqlClient.py | optimize | terminusdb/woql-client-p | python | def optimize(self, path: str) -> None:
'Optimize the specified path.\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a database\n\n Notes\n -----\n The "remote" repo can live in the local database.\n\n Parameters\n ---------... |
def squash(self, message: Optional[str]=None, author: Optional[str]=None, reset: bool=False) -> str:
'Squash the current branch HEAD into a commit\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a database\n\n Notes\n -----\n The "remote"... | 2,707,212,334,958,123,000 | Squash the current branch HEAD into a commit
Raises
------
InterfaceError
if the client does not connect to a database
Notes
-----
The "remote" repo can live in the local database.
Parameters
----------
message : string
Message for the newly created squash commit
author : string
Author of the commit
rese... | terminusdb_client/woqlclient/woqlClient.py | squash | terminusdb/woql-client-p | python | def squash(self, message: Optional[str]=None, author: Optional[str]=None, reset: bool=False) -> str:
'Squash the current branch HEAD into a commit\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a database\n\n Notes\n -----\n The "remote"... |
def diff(self, before: Union[(str, dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], after: Union[(str, dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], document_id: Union[(str, None)]=None):
'Perform diff on 2 set of document(s), result in a Patch object... | -3,945,360,807,689,457,000 | Perform diff on 2 set of document(s), result in a Patch object.
Do not connect when using public API.
Returns
-------
obj
Patch object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="root", team="admin", db="some_db")
>>> result = client.diff({ "@id" : "... | terminusdb_client/woqlclient/woqlClient.py | diff | terminusdb/woql-client-p | python | def diff(self, before: Union[(str, dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], after: Union[(str, dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], document_id: Union[(str, None)]=None):
'Perform diff on 2 set of document(s), result in a Patch object... |
def patch(self, before: Union[(dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], patch: Patch):
'Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.\n\n Do not connect when using public API.\n\n ... | -7,215,206,631,332,217,000 | Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.
Do not connect when using public API.
Returns
-------
dict
After object
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client.connect(user="admin", key="ro... | terminusdb_client/woqlclient/woqlClient.py | patch | terminusdb/woql-client-p | python | def patch(self, before: Union[(dict, List[dict], 'WOQLSchema', 'DocumentTemplate', List['DocumentTemplate'])], patch: Patch):
'Apply the patch object to the before object and return an after object. Note that this change does not commit changes to the graph.\n\n Do not connect when using public API.\n\n ... |
def clonedb(self, clone_source: str, newid: str, description: Optional[str]=None) -> None:
'Clone a remote repository and create a local copy.\n\n Parameters\n ----------\n clone_source : str\n The source url of the repo to be cloned.\n newid : str\n Identifier of t... | -6,368,264,291,951,515,000 | Clone a remote repository and create a local copy.
Parameters
----------
clone_source : str
The source url of the repo to be cloned.
newid : str
Identifier of the new repository to create.
Description : str, optional
Optional description about the cloned database.
Raises
------
InterfaceError
if the c... | terminusdb_client/woqlclient/woqlClient.py | clonedb | terminusdb/woql-client-p | python | def clonedb(self, clone_source: str, newid: str, description: Optional[str]=None) -> None:
'Clone a remote repository and create a local copy.\n\n Parameters\n ----------\n clone_source : str\n The source url of the repo to be cloned.\n newid : str\n Identifier of t... |
def _generate_commit(self, msg: Optional[str]=None, author: Optional[str]=None) -> dict:
'Pack the specified commit info into a dict format expected by the server.\n\n Parameters\n ----------\n msg : str\n Commit message.\n author : str\n Commit author.\n\n R... | -1,409,766,101,435,979,300 | Pack the specified commit info into a dict format expected by the server.
Parameters
----------
msg : str
Commit message.
author : str
Commit author.
Returns
-------
dict
Formatted commit info.
Examples
--------
>>> client = WOQLClient("https://127.0.0.1:6363/")
>>> client._generate_commit("<message>", "... | terminusdb_client/woqlclient/woqlClient.py | _generate_commit | terminusdb/woql-client-p | python | def _generate_commit(self, msg: Optional[str]=None, author: Optional[str]=None) -> dict:
'Pack the specified commit info into a dict format expected by the server.\n\n Parameters\n ----------\n msg : str\n Commit message.\n author : str\n Commit author.\n\n R... |
def get_database(self, dbid: str) -> Optional[dict]:
'\n Returns metadata (id, organization, label, comment) about the requested database\n Parameters\n ----------\n dbid : str\n The id of the database\n\n Raises\n ------\n InterfaceError\n if t... | 579,466,602,066,837,800 | Returns metadata (id, organization, label, comment) about the requested database
Parameters
----------
dbid : str
The id of the database
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
dict or None if not found | terminusdb_client/woqlclient/woqlClient.py | get_database | terminusdb/woql-client-p | python | def get_database(self, dbid: str) -> Optional[dict]:
'\n Returns metadata (id, organization, label, comment) about the requested database\n Parameters\n ----------\n dbid : str\n The id of the database\n\n Raises\n ------\n InterfaceError\n if t... |
def get_databases(self) -> List[dict]:
'\n Returns a list of database metadata records for all databases the user has access to\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a server\n\n Returns\n -------\n list of dicts\n ... | -8,478,958,528,453,672,000 | Returns a list of database metadata records for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts | terminusdb_client/woqlclient/woqlClient.py | get_databases | terminusdb/woql-client-p | python | def get_databases(self) -> List[dict]:
'\n Returns a list of database metadata records for all databases the user has access to\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a server\n\n Returns\n -------\n list of dicts\n ... |
def list_databases(self) -> List[Dict]:
'\n Returns a list of database ids for all databases the user has access to\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a server\n\n Returns\n -------\n list of dicts\n '
self.... | 7,350,422,752,582,065,000 | Returns a list of database ids for all databases the user has access to
Raises
------
InterfaceError
if the client does not connect to a server
Returns
-------
list of dicts | terminusdb_client/woqlclient/woqlClient.py | list_databases | terminusdb/woql-client-p | python | def list_databases(self) -> List[Dict]:
'\n Returns a list of database ids for all databases the user has access to\n\n Raises\n ------\n InterfaceError\n if the client does not connect to a server\n\n Returns\n -------\n list of dicts\n '
self.... |
@since('2.3.0')
def getEpsilon(self):
'\n Gets the value of epsilon or its default value.\n '
return self.getOrDefault(self.epsilon) | -1,535,077,709,341,858,600 | Gets the value of epsilon or its default value. | python/pyspark/ml/regression.py | getEpsilon | AjithShetty2489/spark | python | @since('2.3.0')
def getEpsilon(self):
'\n \n '
return self.getOrDefault(self.epsilon) |
@keyword_only
def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-06, fitIntercept=True, standardization=True, solver='auto', weightCol=None, aggregationDepth=2, loss='squaredError', epsilon=1.35):
'\n __init__(self, ... | -2,032,851,235,059,459,800 | __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, standardization=True, solver="auto", weightCol=None, aggregationDepth=2, loss="squaredError", epsilon=1.35) | python/pyspark/ml/regression.py | __init__ | AjithShetty2489/spark | python | @keyword_only
def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-06, fitIntercept=True, standardization=True, solver='auto', weightCol=None, aggregationDepth=2, loss='squaredError', epsilon=1.35):
'\n \n '
... |
@keyword_only
@since('1.4.0')
def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-06, fitIntercept=True, standardization=True, solver='auto', weightCol=None, aggregationDepth=2, loss='squaredError', epsilon=1.35):
'\n ... | 1,798,489,900,972,090,400 | setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, standardization=True, solver="auto", weightCol=None, aggregationDepth=2, loss="squaredError", epsilon=1... | python/pyspark/ml/regression.py | setParams | AjithShetty2489/spark | python | @keyword_only
@since('1.4.0')
def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-06, fitIntercept=True, standardization=True, solver='auto', weightCol=None, aggregationDepth=2, loss='squaredError', epsilon=1.35):
'\n ... |
@since('2.3.0')
def setEpsilon(self, value):
'\n Sets the value of :py:attr:`epsilon`.\n '
return self._set(epsilon=value) | -6,138,312,146,789,055,000 | Sets the value of :py:attr:`epsilon`. | python/pyspark/ml/regression.py | setEpsilon | AjithShetty2489/spark | python | @since('2.3.0')
def setEpsilon(self, value):
'\n \n '
return self._set(epsilon=value) |
def setMaxIter(self, value):
'\n Sets the value of :py:attr:`maxIter`.\n '
return self._set(maxIter=value) | 1,288,476,920,971,597,800 | Sets the value of :py:attr:`maxIter`. | python/pyspark/ml/regression.py | setMaxIter | AjithShetty2489/spark | python | def setMaxIter(self, value):
'\n \n '
return self._set(maxIter=value) |
def setRegParam(self, value):
'\n Sets the value of :py:attr:`regParam`.\n '
return self._set(regParam=value) | -7,433,284,680,045,634,000 | Sets the value of :py:attr:`regParam`. | python/pyspark/ml/regression.py | setRegParam | AjithShetty2489/spark | python | def setRegParam(self, value):
'\n \n '
return self._set(regParam=value) |
def setTol(self, value):
'\n Sets the value of :py:attr:`tol`.\n '
return self._set(tol=value) | 3,579,479,604,354,210,000 | Sets the value of :py:attr:`tol`. | python/pyspark/ml/regression.py | setTol | AjithShetty2489/spark | python | def setTol(self, value):
'\n \n '
return self._set(tol=value) |
def setElasticNetParam(self, value):
'\n Sets the value of :py:attr:`elasticNetParam`.\n '
return self._set(elasticNetParam=value) | -3,621,938,483,410,980,000 | Sets the value of :py:attr:`elasticNetParam`. | python/pyspark/ml/regression.py | setElasticNetParam | AjithShetty2489/spark | python | def setElasticNetParam(self, value):
'\n \n '
return self._set(elasticNetParam=value) |
def setFitIntercept(self, value):
'\n Sets the value of :py:attr:`fitIntercept`.\n '
return self._set(fitIntercept=value) | -3,408,520,087,298,627,000 | Sets the value of :py:attr:`fitIntercept`. | python/pyspark/ml/regression.py | setFitIntercept | AjithShetty2489/spark | python | def setFitIntercept(self, value):
'\n \n '
return self._set(fitIntercept=value) |
def setStandardization(self, value):
'\n Sets the value of :py:attr:`standardization`.\n '
return self._set(standardization=value) | 2,077,692,268,013,256,400 | Sets the value of :py:attr:`standardization`. | python/pyspark/ml/regression.py | setStandardization | AjithShetty2489/spark | python | def setStandardization(self, value):
'\n \n '
return self._set(standardization=value) |
def setWeightCol(self, value):
'\n Sets the value of :py:attr:`weightCol`.\n '
return self._set(weightCol=value) | 7,126,166,856,151,208,000 | Sets the value of :py:attr:`weightCol`. | python/pyspark/ml/regression.py | setWeightCol | AjithShetty2489/spark | python | def setWeightCol(self, value):
'\n \n '
return self._set(weightCol=value) |
def setSolver(self, value):
'\n Sets the value of :py:attr:`solver`.\n '
return self._set(solver=value) | 659,948,514,240,389,100 | Sets the value of :py:attr:`solver`. | python/pyspark/ml/regression.py | setSolver | AjithShetty2489/spark | python | def setSolver(self, value):
'\n \n '
return self._set(solver=value) |
def setAggregationDepth(self, value):
'\n Sets the value of :py:attr:`aggregationDepth`.\n '
return self._set(aggregationDepth=value) | 5,049,484,725,658,685,000 | Sets the value of :py:attr:`aggregationDepth`. | python/pyspark/ml/regression.py | setAggregationDepth | AjithShetty2489/spark | python | def setAggregationDepth(self, value):
'\n \n '
return self._set(aggregationDepth=value) |
def setLoss(self, value):
'\n Sets the value of :py:attr:`loss`.\n '
return self._set(lossType=value) | 4,226,753,261,501,401,000 | Sets the value of :py:attr:`loss`. | python/pyspark/ml/regression.py | setLoss | AjithShetty2489/spark | python | def setLoss(self, value):
'\n \n '
return self._set(lossType=value) |
@property
@since('2.0.0')
def coefficients(self):
'\n Model coefficients.\n '
return self._call_java('coefficients') | 6,857,518,054,360,473,000 | Model coefficients. | python/pyspark/ml/regression.py | coefficients | AjithShetty2489/spark | python | @property
@since('2.0.0')
def coefficients(self):
'\n \n '
return self._call_java('coefficients') |
@property
@since('1.4.0')
def intercept(self):
'\n Model intercept.\n '
return self._call_java('intercept') | 5,739,049,948,923,467,000 | Model intercept. | python/pyspark/ml/regression.py | intercept | AjithShetty2489/spark | python | @property
@since('1.4.0')
def intercept(self):
'\n \n '
return self._call_java('intercept') |
@property
@since('2.3.0')
def scale(self):
'\n The value by which :math:`\\|y - X\'w\\|` is scaled down when loss is "huber", otherwise 1.0.\n '
return self._call_java('scale') | 3,241,283,480,831,272,400 | The value by which :math:`\|y - X'w\|` is scaled down when loss is "huber", otherwise 1.0. | python/pyspark/ml/regression.py | scale | AjithShetty2489/spark | python | @property
@since('2.3.0')
def scale(self):
'\n The value by which :math:`\\|y - X\'w\\|` is scaled down when loss is "huber", otherwise 1.0.\n '
return self._call_java('scale') |
@property
@since('2.0.0')
def summary(self):
'\n Gets summary (e.g. residuals, mse, r-squared ) of model on\n training set. An exception is thrown if\n `trainingSummary is None`.\n '
if self.hasSummary:
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self)... | -8,882,501,893,985,365,000 | Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`. | python/pyspark/ml/regression.py | summary | AjithShetty2489/spark | python | @property
@since('2.0.0')
def summary(self):
'\n Gets summary (e.g. residuals, mse, r-squared ) of model on\n training set. An exception is thrown if\n `trainingSummary is None`.\n '
if self.hasSummary:
return LinearRegressionTrainingSummary(super(LinearRegressionModel, self)... |
@since('2.0.0')
def evaluate(self, dataset):
'\n Evaluates the model on a test dataset.\n\n :param dataset:\n Test dataset to evaluate model on, where dataset is an\n instance of :py:class:`pyspark.sql.DataFrame`\n '
if (not isinstance(dataset, DataFrame)):
raise V... | -4,808,242,261,066,155,000 | Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame` | python/pyspark/ml/regression.py | evaluate | AjithShetty2489/spark | python | @since('2.0.0')
def evaluate(self, dataset):
'\n Evaluates the model on a test dataset.\n\n :param dataset:\n Test dataset to evaluate model on, where dataset is an\n instance of :py:class:`pyspark.sql.DataFrame`\n '
if (not isinstance(dataset, DataFrame)):
raise V... |
@property
@since('2.0.0')
def predictions(self):
"\n Dataframe outputted by the model's `transform` method.\n "
return self._call_java('predictions') | -8,863,001,023,905,391,000 | Dataframe outputted by the model's `transform` method. | python/pyspark/ml/regression.py | predictions | AjithShetty2489/spark | python | @property
@since('2.0.0')
def predictions(self):
"\n \n "
return self._call_java('predictions') |
@property
@since('2.0.0')
def predictionCol(self):
'\n Field in "predictions" which gives the predicted value of\n the label at each instance.\n '
return self._call_java('predictionCol') | 8,979,628,586,212,194,000 | Field in "predictions" which gives the predicted value of
the label at each instance. | python/pyspark/ml/regression.py | predictionCol | AjithShetty2489/spark | python | @property
@since('2.0.0')
def predictionCol(self):
'\n Field in "predictions" which gives the predicted value of\n the label at each instance.\n '
return self._call_java('predictionCol') |
@property
@since('2.0.0')
def labelCol(self):
'\n Field in "predictions" which gives the true label of each\n instance.\n '
return self._call_java('labelCol') | -6,334,475,539,055,536,000 | Field in "predictions" which gives the true label of each
instance. | python/pyspark/ml/regression.py | labelCol | AjithShetty2489/spark | python | @property
@since('2.0.0')
def labelCol(self):
'\n Field in "predictions" which gives the true label of each\n instance.\n '
return self._call_java('labelCol') |
@property
@since('2.0.0')
def featuresCol(self):
'\n Field in "predictions" which gives the features of each instance\n as a vector.\n '
return self._call_java('featuresCol') | 6,743,549,635,058,488,000 | Field in "predictions" which gives the features of each instance
as a vector. | python/pyspark/ml/regression.py | featuresCol | AjithShetty2489/spark | python | @property
@since('2.0.0')
def featuresCol(self):
'\n Field in "predictions" which gives the features of each instance\n as a vector.\n '
return self._call_java('featuresCol') |
@property
@since('2.0.0')
def explainedVariance(self):
'\n Returns the explained variance regression score.\n explainedVariance = :math:`1 - \\frac{variance(y - \\hat{y})}{variance(y)}`\n\n .. seealso:: `Wikipedia explain variation\n <http://en.wikipedia.org/wiki/Explained_variation>... | -8,904,734,550,016,116,000 | Returns the explained variance regression score.
explainedVariance = :math:`1 - \frac{variance(y - \hat{y})}{variance(y)}`
.. seealso:: `Wikipedia explain variation
<http://en.wikipedia.org/wiki/Explained_variation>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCo... | python/pyspark/ml/regression.py | explainedVariance | AjithShetty2489/spark | python | @property
@since('2.0.0')
def explainedVariance(self):
'\n Returns the explained variance regression score.\n explainedVariance = :math:`1 - \\frac{variance(y - \\hat{y})}{variance(y)}`\n\n .. seealso:: `Wikipedia explain variation\n <http://en.wikipedia.org/wiki/Explained_variation>... |
@property
@since('2.0.0')
def meanAbsoluteError(self):
'\n Returns the mean absolute error, which is a risk function\n corresponding to the expected value of the absolute error\n loss or l1-norm loss.\n\n .. note:: This ignores instance weights (setting all to 1.0) from\n `Lin... | 7,154,150,193,848,016,000 | Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions. | python/pyspark/ml/regression.py | meanAbsoluteError | AjithShetty2489/spark | python | @property
@since('2.0.0')
def meanAbsoluteError(self):
'\n Returns the mean absolute error, which is a risk function\n corresponding to the expected value of the absolute error\n loss or l1-norm loss.\n\n .. note:: This ignores instance weights (setting all to 1.0) from\n `Lin... |
@property
@since('2.0.0')
def meanSquaredError(self):
'\n Returns the mean squared error, which is a risk function\n corresponding to the expected value of the squared error\n loss or quadratic loss.\n\n .. note:: This ignores instance weights (setting all to 1.0) from\n `Line... | -4,137,376,390,146,737,700 | Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions. | python/pyspark/ml/regression.py | meanSquaredError | AjithShetty2489/spark | python | @property
@since('2.0.0')
def meanSquaredError(self):
'\n Returns the mean squared error, which is a risk function\n corresponding to the expected value of the squared error\n loss or quadratic loss.\n\n .. note:: This ignores instance weights (setting all to 1.0) from\n `Line... |
@property
@since('2.0.0')
def rootMeanSquaredError(self):
'\n Returns the root mean squared error, which is defined as the\n square root of the mean squared error.\n\n .. note:: This ignores instance weights (setting all to 1.0) from\n `LinearRegression.weightCol`. This will change i... | -338,729,532,713,508,700 | Returns the root mean squared error, which is defined as the
square root of the mean squared error.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions. | python/pyspark/ml/regression.py | rootMeanSquaredError | AjithShetty2489/spark | python | @property
@since('2.0.0')
def rootMeanSquaredError(self):
'\n Returns the root mean squared error, which is defined as the\n square root of the mean squared error.\n\n .. note:: This ignores instance weights (setting all to 1.0) from\n `LinearRegression.weightCol`. This will change i... |
@property
@since('2.0.0')
def r2(self):
'\n Returns R^2, the coefficient of determination.\n\n .. seealso:: `Wikipedia coefficient of determination\n <http://en.wikipedia.org/wiki/Coefficient_of_determination>`_\n\n .. note:: This ignores instance weights (setting all to 1.0) from\n ... | -6,180,160,953,391,052,000 | Returns R^2, the coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions. | python/pyspark/ml/regression.py | r2 | AjithShetty2489/spark | python | @property
@since('2.0.0')
def r2(self):
'\n Returns R^2, the coefficient of determination.\n\n .. seealso:: `Wikipedia coefficient of determination\n <http://en.wikipedia.org/wiki/Coefficient_of_determination>`_\n\n .. note:: This ignores instance weights (setting all to 1.0) from\n ... |
@property
@since('2.4.0')
def r2adj(self):
'\n Returns Adjusted R^2, the adjusted coefficient of determination.\n\n .. seealso:: `Wikipedia coefficient of determination, Adjusted R^2\n <https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_\n\n .. note:: This ignor... | -2,022,324,800,128,418,300 | Returns Adjusted R^2, the adjusted coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination, Adjusted R^2
<https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This wi... | python/pyspark/ml/regression.py | r2adj | AjithShetty2489/spark | python | @property
@since('2.4.0')
def r2adj(self):
'\n Returns Adjusted R^2, the adjusted coefficient of determination.\n\n .. seealso:: `Wikipedia coefficient of determination, Adjusted R^2\n <https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2>`_\n\n .. note:: This ignor... |
@property
@since('2.0.0')
def residuals(self):
'\n Residuals (label - predicted value)\n '
return self._call_java('residuals') | 6,089,670,078,438,460,000 | Residuals (label - predicted value) | python/pyspark/ml/regression.py | residuals | AjithShetty2489/spark | python | @property
@since('2.0.0')
def residuals(self):
'\n \n '
return self._call_java('residuals') |
@property
@since('2.0.0')
def numInstances(self):
'\n Number of instances in DataFrame predictions\n '
return self._call_java('numInstances') | 567,005,979,655,261,800 | Number of instances in DataFrame predictions | python/pyspark/ml/regression.py | numInstances | AjithShetty2489/spark | python | @property
@since('2.0.0')
def numInstances(self):
'\n \n '
return self._call_java('numInstances') |
@property
@since('2.2.0')
def degreesOfFreedom(self):
'\n Degrees of freedom.\n '
return self._call_java('degreesOfFreedom') | 8,608,220,457,733,950,000 | Degrees of freedom. | python/pyspark/ml/regression.py | degreesOfFreedom | AjithShetty2489/spark | python | @property
@since('2.2.0')
def degreesOfFreedom(self):
'\n \n '
return self._call_java('degreesOfFreedom') |
@property
@since('2.0.0')
def devianceResiduals(self):
'\n The weighted residuals, the usual residuals rescaled by the\n square root of the instance weights.\n '
return self._call_java('devianceResiduals') | -5,755,805,787,819,274,000 | The weighted residuals, the usual residuals rescaled by the
square root of the instance weights. | python/pyspark/ml/regression.py | devianceResiduals | AjithShetty2489/spark | python | @property
@since('2.0.0')
def devianceResiduals(self):
'\n The weighted residuals, the usual residuals rescaled by the\n square root of the instance weights.\n '
return self._call_java('devianceResiduals') |
@property
@since('2.0.0')
def coefficientStandardErrors(self):
'\n Standard error of estimated coefficients and intercept.\n This value is only available when using the "normal" solver.\n\n If :py:attr:`LinearRegression.fitIntercept` is set to True,\n then the last element returned corre... | -5,840,749,010,366,116,000 | Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver` | python/pyspark/ml/regression.py | coefficientStandardErrors | AjithShetty2489/spark | python | @property
@since('2.0.0')
def coefficientStandardErrors(self):
'\n Standard error of estimated coefficients and intercept.\n This value is only available when using the "normal" solver.\n\n If :py:attr:`LinearRegression.fitIntercept` is set to True,\n then the last element returned corre... |
@property
@since('2.0.0')
def tValues(self):
'\n T-statistic of estimated coefficients and intercept.\n This value is only available when using the "normal" solver.\n\n If :py:attr:`LinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the interce... | -4,420,046,533,744,205,000 | T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver` | python/pyspark/ml/regression.py | tValues | AjithShetty2489/spark | python | @property
@since('2.0.0')
def tValues(self):
'\n T-statistic of estimated coefficients and intercept.\n This value is only available when using the "normal" solver.\n\n If :py:attr:`LinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the interce... |
@property
@since('2.0.0')
def pValues(self):
'\n Two-sided p-value of estimated coefficients and intercept.\n This value is only available when using the "normal" solver.\n\n If :py:attr:`LinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the i... | -827,290,872,445,449,000 | Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver` | python/pyspark/ml/regression.py | pValues | AjithShetty2489/spark | python | @property
@since('2.0.0')
def pValues(self):
'\n Two-sided p-value of estimated coefficients and intercept.\n This value is only available when using the "normal" solver.\n\n If :py:attr:`LinearRegression.fitIntercept` is set to True,\n then the last element returned corresponds to the i... |
@property
@since('2.0.0')
def objectiveHistory(self):
'\n Objective function (scaled loss + regularization) at each\n iteration.\n This value is only available when using the "l-bfgs" solver.\n\n .. seealso:: :py:attr:`LinearRegression.solver`\n '
return self._call_java('objec... | -7,613,942,411,571,399,000 | Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver` | python/pyspark/ml/regression.py | objectiveHistory | AjithShetty2489/spark | python | @property
@since('2.0.0')
def objectiveHistory(self):
'\n Objective function (scaled loss + regularization) at each\n iteration.\n This value is only available when using the "l-bfgs" solver.\n\n .. seealso:: :py:attr:`LinearRegression.solver`\n '
return self._call_java('objec... |
@property
@since('2.0.0')
def totalIterations(self):
'\n Number of training iterations until termination.\n This value is only available when using the "l-bfgs" solver.\n\n .. seealso:: :py:attr:`LinearRegression.solver`\n '
return self._call_java('totalIterations') | -6,211,222,178,903,529,000 | Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver` | python/pyspark/ml/regression.py | totalIterations | AjithShetty2489/spark | python | @property
@since('2.0.0')
def totalIterations(self):
'\n Number of training iterations until termination.\n This value is only available when using the "l-bfgs" solver.\n\n .. seealso:: :py:attr:`LinearRegression.solver`\n '
return self._call_java('totalIterations') |
def getIsotonic(self):
'\n Gets the value of isotonic or its default value.\n '
return self.getOrDefault(self.isotonic) | 2,921,464,487,635,401,000 | Gets the value of isotonic or its default value. | python/pyspark/ml/regression.py | getIsotonic | AjithShetty2489/spark | python | def getIsotonic(self):
'\n \n '
return self.getOrDefault(self.isotonic) |
def getFeatureIndex(self):
'\n Gets the value of featureIndex or its default value.\n '
return self.getOrDefault(self.featureIndex) | -425,671,395,134,212,900 | Gets the value of featureIndex or its default value. | python/pyspark/ml/regression.py | getFeatureIndex | AjithShetty2489/spark | python | def getFeatureIndex(self):
'\n \n '
return self.getOrDefault(self.featureIndex) |
@keyword_only
def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', weightCol=None, isotonic=True, featureIndex=0):
'\n __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0):\n ... | -5,562,647,240,495,843,000 | __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0): | python/pyspark/ml/regression.py | __init__ | AjithShetty2489/spark | python | @keyword_only
def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', weightCol=None, isotonic=True, featureIndex=0):
'\n \n '
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj('org.apache.spark.ml.regression.IsotonicRegression', se... |
@keyword_only
def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', weightCol=None, isotonic=True, featureIndex=0):
'\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0):\n ... | -4,398,090,866,832,613,000 | setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression. | python/pyspark/ml/regression.py | setParams | AjithShetty2489/spark | python | @keyword_only
def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', weightCol=None, isotonic=True, featureIndex=0):
'\n setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", weightCol=None, isotonic=True, featureIndex=0):\n ... |
def setIsotonic(self, value):
'\n Sets the value of :py:attr:`isotonic`.\n '
return self._set(isotonic=value) | 6,740,910,986,481,251,000 | Sets the value of :py:attr:`isotonic`. | python/pyspark/ml/regression.py | setIsotonic | AjithShetty2489/spark | python | def setIsotonic(self, value):
'\n \n '
return self._set(isotonic=value) |
def setFeatureIndex(self, value):
'\n Sets the value of :py:attr:`featureIndex`.\n '
return self._set(featureIndex=value) | 712,460,935,526,708,900 | Sets the value of :py:attr:`featureIndex`. | python/pyspark/ml/regression.py | setFeatureIndex | AjithShetty2489/spark | python | def setFeatureIndex(self, value):
'\n \n '
return self._set(featureIndex=value) |
@since('1.6.0')
def setFeaturesCol(self, value):
'\n Sets the value of :py:attr:`featuresCol`.\n '
return self._set(featuresCol=value) | 668,118,026,924,361,500 | Sets the value of :py:attr:`featuresCol`. | python/pyspark/ml/regression.py | setFeaturesCol | AjithShetty2489/spark | python | @since('1.6.0')
def setFeaturesCol(self, value):
'\n \n '
return self._set(featuresCol=value) |
@since('1.6.0')
def setPredictionCol(self, value):
'\n Sets the value of :py:attr:`predictionCol`.\n '
return self._set(predictionCol=value) | -5,502,708,142,385,355,000 | Sets the value of :py:attr:`predictionCol`. | python/pyspark/ml/regression.py | setPredictionCol | AjithShetty2489/spark | python | @since('1.6.0')
def setPredictionCol(self, value):
'\n \n '
return self._set(predictionCol=value) |
@since('1.6.0')
def setLabelCol(self, value):
'\n Sets the value of :py:attr:`labelCol`.\n '
return self._set(labelCol=value) | 5,234,744,185,482,678,000 | Sets the value of :py:attr:`labelCol`. | python/pyspark/ml/regression.py | setLabelCol | AjithShetty2489/spark | python | @since('1.6.0')
def setLabelCol(self, value):
'\n \n '
return self._set(labelCol=value) |
@since('1.6.0')
def setWeightCol(self, value):
'\n Sets the value of :py:attr:`weightCol`.\n '
return self._set(weightCol=value) | 111,680,165,829,194,820 | Sets the value of :py:attr:`weightCol`. | python/pyspark/ml/regression.py | setWeightCol | AjithShetty2489/spark | python | @since('1.6.0')
def setWeightCol(self, value):
'\n \n '
return self._set(weightCol=value) |
@since('3.0.0')
def setFeaturesCol(self, value):
'\n Sets the value of :py:attr:`featuresCol`.\n '
return self._set(featuresCol=value) | -4,158,365,003,254,860,000 | Sets the value of :py:attr:`featuresCol`. | python/pyspark/ml/regression.py | setFeaturesCol | AjithShetty2489/spark | python | @since('3.0.0')
def setFeaturesCol(self, value):
'\n \n '
return self._set(featuresCol=value) |
@since('3.0.0')
def setPredictionCol(self, value):
'\n Sets the value of :py:attr:`predictionCol`.\n '
return self._set(predictionCol=value) | 1,012,894,679,154,316,400 | Sets the value of :py:attr:`predictionCol`. | python/pyspark/ml/regression.py | setPredictionCol | AjithShetty2489/spark | python | @since('3.0.0')
def setPredictionCol(self, value):
'\n \n '
return self._set(predictionCol=value) |
def setFeatureIndex(self, value):
'\n Sets the value of :py:attr:`featureIndex`.\n '
return self._set(featureIndex=value) | 712,460,935,526,708,900 | Sets the value of :py:attr:`featureIndex`. | python/pyspark/ml/regression.py | setFeatureIndex | AjithShetty2489/spark | python | def setFeatureIndex(self, value):
'\n \n '
return self._set(featureIndex=value) |
@property
@since('1.6.0')
def boundaries(self):
'\n Boundaries in increasing order for which predictions are known.\n '
return self._call_java('boundaries') | 25,368,690,135,028,264 | Boundaries in increasing order for which predictions are known. | python/pyspark/ml/regression.py | boundaries | AjithShetty2489/spark | python | @property
@since('1.6.0')
def boundaries(self):
'\n \n '
return self._call_java('boundaries') |
@property
@since('1.6.0')
def predictions(self):
'\n Predictions associated with the boundaries at the same index, monotone because of isotonic\n regression.\n '
return self._call_java('predictions') | 583,706,518,878,989,700 | Predictions associated with the boundaries at the same index, monotone because of isotonic
regression. | python/pyspark/ml/regression.py | predictions | AjithShetty2489/spark | python | @property
@since('1.6.0')
def predictions(self):
'\n Predictions associated with the boundaries at the same index, monotone because of isotonic\n regression.\n '
return self._call_java('predictions') |
@since('3.0.0')
def numFeatures(self):
'\n Returns the number of features the model was trained on. If unknown, returns -1\n '
return self._call_java('numFeatures') | -481,038,317,992,713,100 | Returns the number of features the model was trained on. If unknown, returns -1 | python/pyspark/ml/regression.py | numFeatures | AjithShetty2489/spark | python | @since('3.0.0')
def numFeatures(self):
'\n \n '
return self._call_java('numFeatures') |
@since('3.0.0')
def predict(self, value):
'\n Predict label for the given features.\n '
return self._call_java('predict', value) | -3,504,815,097,445,081,000 | Predict label for the given features. | python/pyspark/ml/regression.py | predict | AjithShetty2489/spark | python | @since('3.0.0')
def predict(self, value):
'\n \n '
return self._call_java('predict', value) |
@keyword_only
def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', seed=None, varianceCol=None, weightCol=None, leafCol='', minWeightFrac... | 6,855,883,870,003,851,000 | __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", seed=None, varianceCol=None, weightCo... | python/pyspark/ml/regression.py | __init__ | AjithShetty2489/spark | python | @keyword_only
def __init__(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', seed=None, varianceCol=None, weightCol=None, leafCol=, minWeightFracti... |
@keyword_only
@since('1.4.0')
def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', seed=None, varianceCol=None, weightCol=None, leafCol=... | -5,414,660,128,328,874,000 | setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance", seed=None, varianceCol=None, weig... | python/pyspark/ml/regression.py | setParams | AjithShetty2489/spark | python | @keyword_only
@since('1.4.0')
def setParams(self, featuresCol='features', labelCol='label', predictionCol='prediction', maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity='variance', seed=None, varianceCol=None, weightCol=None, leafCol=... |
@since('1.4.0')
def setMaxDepth(self, value):
'\n Sets the value of :py:attr:`maxDepth`.\n '
return self._set(maxDepth=value) | 6,900,103,097,222,239,000 | Sets the value of :py:attr:`maxDepth`. | python/pyspark/ml/regression.py | setMaxDepth | AjithShetty2489/spark | python | @since('1.4.0')
def setMaxDepth(self, value):
'\n \n '
return self._set(maxDepth=value) |
@since('1.4.0')
def setMaxBins(self, value):
'\n Sets the value of :py:attr:`maxBins`.\n '
return self._set(maxBins=value) | -7,662,286,719,655,598,000 | Sets the value of :py:attr:`maxBins`. | python/pyspark/ml/regression.py | setMaxBins | AjithShetty2489/spark | python | @since('1.4.0')
def setMaxBins(self, value):
'\n \n '
return self._set(maxBins=value) |
@since('1.4.0')
def setMinInstancesPerNode(self, value):
'\n Sets the value of :py:attr:`minInstancesPerNode`.\n '
return self._set(minInstancesPerNode=value) | -8,071,360,048,086,069,000 | Sets the value of :py:attr:`minInstancesPerNode`. | python/pyspark/ml/regression.py | setMinInstancesPerNode | AjithShetty2489/spark | python | @since('1.4.0')
def setMinInstancesPerNode(self, value):
'\n \n '
return self._set(minInstancesPerNode=value) |
@since('3.0.0')
def setMinWeightFractionPerNode(self, value):
'\n Sets the value of :py:attr:`minWeightFractionPerNode`.\n '
return self._set(minWeightFractionPerNode=value) | 5,709,196,588,527,269,000 | Sets the value of :py:attr:`minWeightFractionPerNode`. | python/pyspark/ml/regression.py | setMinWeightFractionPerNode | AjithShetty2489/spark | python | @since('3.0.0')
def setMinWeightFractionPerNode(self, value):
'\n \n '
return self._set(minWeightFractionPerNode=value) |
@since('1.4.0')
def setMinInfoGain(self, value):
'\n Sets the value of :py:attr:`minInfoGain`.\n '
return self._set(minInfoGain=value) | -2,189,222,529,958,267,000 | Sets the value of :py:attr:`minInfoGain`. | python/pyspark/ml/regression.py | setMinInfoGain | AjithShetty2489/spark | python | @since('1.4.0')
def setMinInfoGain(self, value):
'\n \n '
return self._set(minInfoGain=value) |
@since('1.4.0')
def setMaxMemoryInMB(self, value):
'\n Sets the value of :py:attr:`maxMemoryInMB`.\n '
return self._set(maxMemoryInMB=value) | -7,743,889,602,156,593,000 | Sets the value of :py:attr:`maxMemoryInMB`. | python/pyspark/ml/regression.py | setMaxMemoryInMB | AjithShetty2489/spark | python | @since('1.4.0')
def setMaxMemoryInMB(self, value):
'\n \n '
return self._set(maxMemoryInMB=value) |
@since('1.4.0')
def setCacheNodeIds(self, value):
'\n Sets the value of :py:attr:`cacheNodeIds`.\n '
return self._set(cacheNodeIds=value) | -7,957,309,380,185,966,000 | Sets the value of :py:attr:`cacheNodeIds`. | python/pyspark/ml/regression.py | setCacheNodeIds | AjithShetty2489/spark | python | @since('1.4.0')
def setCacheNodeIds(self, value):
'\n \n '
return self._set(cacheNodeIds=value) |
@since('1.4.0')
def setImpurity(self, value):
'\n Sets the value of :py:attr:`impurity`.\n '
return self._set(impurity=value) | 5,925,454,725,552,672,000 | Sets the value of :py:attr:`impurity`. | python/pyspark/ml/regression.py | setImpurity | AjithShetty2489/spark | python | @since('1.4.0')
def setImpurity(self, value):
'\n \n '
return self._set(impurity=value) |
@since('1.4.0')
def setCheckpointInterval(self, value):
'\n Sets the value of :py:attr:`checkpointInterval`.\n '
return self._set(checkpointInterval=value) | -7,454,580,376,492,684,000 | Sets the value of :py:attr:`checkpointInterval`. | python/pyspark/ml/regression.py | setCheckpointInterval | AjithShetty2489/spark | python | @since('1.4.0')
def setCheckpointInterval(self, value):
'\n \n '
return self._set(checkpointInterval=value) |
def setSeed(self, value):
'\n Sets the value of :py:attr:`seed`.\n '
return self._set(seed=value) | -88,293,150,966,480,180 | Sets the value of :py:attr:`seed`. | python/pyspark/ml/regression.py | setSeed | AjithShetty2489/spark | python | def setSeed(self, value):
'\n \n '
return self._set(seed=value) |
@since('3.0.0')
def setWeightCol(self, value):
'\n Sets the value of :py:attr:`weightCol`.\n '
return self._set(weightCol=value) | 3,791,292,180,445,544,000 | Sets the value of :py:attr:`weightCol`. | python/pyspark/ml/regression.py | setWeightCol | AjithShetty2489/spark | python | @since('3.0.0')
def setWeightCol(self, value):
'\n \n '
return self._set(weightCol=value) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.