1
0
Fork 0

Delete venv folder

parallel_struct_est
Filippo Martini 4 years ago
parent 8a13c33b93
commit d4a079268a
  1. 241
      venv/bin/Activate.ps1
  2. 66
      venv/bin/activate
  3. 25
      venv/bin/activate.csh
  4. 64
      venv/bin/activate.fish
  5. 8
      venv/bin/easy_install
  6. 8
      venv/bin/easy_install-3.9
  7. 8
      venv/bin/f2py
  8. 8
      venv/bin/f2py3
  9. 8
      venv/bin/f2py3.9
  10. 8
      venv/bin/pip
  11. 8
      venv/bin/pip3
  12. 8
      venv/bin/pip3.9
  13. 1
      venv/bin/python
  14. 1
      venv/bin/python3
  15. 1
      venv/bin/python3.9
  16. 8
      venv/bin/tqdm
  17. 123
      venv/lib/python3.9/site-packages/_distutils_hack/__init__.py
  18. 1
      venv/lib/python3.9/site-packages/_distutils_hack/override.py
  19. 8
      venv/lib/python3.9/site-packages/dateutil/__init__.py
  20. 43
      venv/lib/python3.9/site-packages/dateutil/_common.py
  21. 4
      venv/lib/python3.9/site-packages/dateutil/_version.py
  22. 89
      venv/lib/python3.9/site-packages/dateutil/easter.py
  23. 61
      venv/lib/python3.9/site-packages/dateutil/parser/__init__.py
  24. 1609
      venv/lib/python3.9/site-packages/dateutil/parser/_parser.py
  25. 411
      venv/lib/python3.9/site-packages/dateutil/parser/isoparser.py
  26. 599
      venv/lib/python3.9/site-packages/dateutil/relativedelta.py
  27. 1735
      venv/lib/python3.9/site-packages/dateutil/rrule.py
  28. 12
      venv/lib/python3.9/site-packages/dateutil/tz/__init__.py
  29. 419
      venv/lib/python3.9/site-packages/dateutil/tz/_common.py
  30. 80
      venv/lib/python3.9/site-packages/dateutil/tz/_factories.py
  31. 1849
      venv/lib/python3.9/site-packages/dateutil/tz/tz.py
  32. 370
      venv/lib/python3.9/site-packages/dateutil/tz/win.py
  33. 2
      venv/lib/python3.9/site-packages/dateutil/tzwin.py
  34. 71
      venv/lib/python3.9/site-packages/dateutil/utils.py
  35. 167
      venv/lib/python3.9/site-packages/dateutil/zoneinfo/__init__.py
  36. BIN
      venv/lib/python3.9/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz
  37. 53
      venv/lib/python3.9/site-packages/dateutil/zoneinfo/rebuild.py
  38. 1
      venv/lib/python3.9/site-packages/decorator-4.4.2.dist-info/INSTALLER
  39. 26
      venv/lib/python3.9/site-packages/decorator-4.4.2.dist-info/LICENSE.txt
  40. 131
      venv/lib/python3.9/site-packages/decorator-4.4.2.dist-info/METADATA
  41. 9
      venv/lib/python3.9/site-packages/decorator-4.4.2.dist-info/RECORD
  42. 6
      venv/lib/python3.9/site-packages/decorator-4.4.2.dist-info/WHEEL
  43. 1
      venv/lib/python3.9/site-packages/decorator-4.4.2.dist-info/top_level.txt
  44. 454
      venv/lib/python3.9/site-packages/decorator.py
  45. 1
      venv/lib/python3.9/site-packages/distutils-precedence.pth
  46. 5
      venv/lib/python3.9/site-packages/easy_install.py
  47. 1
      venv/lib/python3.9/site-packages/networkx-2.5.dist-info/INSTALLER
  48. 40
      venv/lib/python3.9/site-packages/networkx-2.5.dist-info/LICENSE.txt
  49. 142
      venv/lib/python3.9/site-packages/networkx-2.5.dist-info/METADATA
  50. 1242
      venv/lib/python3.9/site-packages/networkx-2.5.dist-info/RECORD
  51. 0
      venv/lib/python3.9/site-packages/networkx-2.5.dist-info/REQUESTED
  52. 5
      venv/lib/python3.9/site-packages/networkx-2.5.dist-info/WHEEL
  53. 1
      venv/lib/python3.9/site-packages/networkx-2.5.dist-info/top_level.txt
  54. 76
      venv/lib/python3.9/site-packages/networkx/__init__.py
  55. 125
      venv/lib/python3.9/site-packages/networkx/algorithms/__init__.py
  56. 22
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/__init__.py
  57. 159
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/clique.py
  58. 58
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/clustering_coefficient.py
  59. 403
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/connectivity.py
  60. 123
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/dominating_set.py
  61. 58
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/independent_set.py
  62. 368
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/kcomponents.py
  63. 42
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/matching.py
  64. 42
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/ramsey.py
  65. 104
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/steinertree.py
  66. 0
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/__init__.py
  67. 43
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py
  68. 107
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_clique.py
  69. 199
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py
  70. 65
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py
  71. 8
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_independent_set.py
  72. 300
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py
  73. 8
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_matching.py
  74. 31
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py
  75. 83
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py
  76. 269
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py
  77. 55
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py
  78. 249
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/treewidth.py
  79. 74
      venv/lib/python3.9/site-packages/networkx/algorithms/approximation/vertex_cover.py
  80. 5
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/__init__.py
  81. 128
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/connectivity.py
  82. 287
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/correlation.py
  83. 233
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/mixing.py
  84. 122
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/neighbor_degree.py
  85. 116
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/pairs.py
  86. 0
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/__init__.py
  87. 51
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/base_test.py
  88. 139
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py
  89. 76
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py
  90. 142
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py
  91. 76
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py
  92. 86
      venv/lib/python3.9/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py
  93. 168
      venv/lib/python3.9/site-packages/networkx/algorithms/asteroidal.py
  94. 86
      venv/lib/python3.9/site-packages/networkx/algorithms/bipartite/__init__.py
  95. 303
      venv/lib/python3.9/site-packages/networkx/algorithms/bipartite/basic.py
  96. 268
      venv/lib/python3.9/site-packages/networkx/algorithms/bipartite/centrality.py
  97. 276
      venv/lib/python3.9/site-packages/networkx/algorithms/bipartite/cluster.py
  98. 55
      venv/lib/python3.9/site-packages/networkx/algorithms/bipartite/covering.py
  99. 357
      venv/lib/python3.9/site-packages/networkx/algorithms/bipartite/edgelist.py
  100. 595
      venv/lib/python3.9/site-packages/networkx/algorithms/bipartite/generators.py
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,241 +0,0 @@
<#
.Synopsis
Activate a Python virtual environment for the current PowerShell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
.Notes
On Windows, it may be required to enable this Activate.ps1 script by setting the
execution policy for the user. You can do this by issuing the following PowerShell
command:
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
For more information on Execution Policies:
https://go.microsoft.com/fwlink/?LinkID=135170
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0, 1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
}
else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
}
else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"

66
venv/bin/activate vendored

@ -1,66 +0,0 @@
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r 2> /dev/null
fi
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
if [ ! "${1:-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
VIRTUAL_ENV="/Users/Zalum/Desktop/Tesi/PyCTBN/venv"
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
PS1="(venv) ${PS1:-}"
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r 2> /dev/null
fi

@ -1,25 +0,0 @@
# This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV "/Users/Zalum/Desktop/Tesi/PyCTBN/venv"
set _OLD_VIRTUAL_PATH="$PATH"
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
set prompt = "(venv) $prompt"
endif
alias pydoc python -m pydoc
rehash

@ -1,64 +0,0 @@
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
# (https://fishshell.com/); you cannot run it directly.
function deactivate -d "Exit virtual environment and return to normal shell environment"
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
set -gx PATH $_OLD_VIRTUAL_PATH
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
functions -e fish_prompt
set -e _OLD_FISH_PROMPT_OVERRIDE
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
end
set -e VIRTUAL_ENV
if test "$argv[1]" != "nondestructive"
# Self-destruct!
functions -e deactivate
end
end
# Unset irrelevant variables.
deactivate nondestructive
set -gx VIRTUAL_ENV "/Users/Zalum/Desktop/Tesi/PyCTBN/venv"
set -gx _OLD_VIRTUAL_PATH $PATH
set -gx PATH "$VIRTUAL_ENV/bin" $PATH
# Unset PYTHONHOME if set.
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# fish uses a function instead of an env var to generate the prompt.
# Save the current fish_prompt function as the function _old_fish_prompt.
functions -c fish_prompt _old_fish_prompt
# With the original prompt function renamed, we can override with our own.
function fish_prompt
# Save the return status of the last command.
set -l old_status $status
# Output the venv prompt; color taken from the blue of the Python logo.
printf "%s%s%s" (set_color 4B8BBE) "(venv) " (set_color normal)
# Restore the return status of the previous command.
echo "exit $old_status" | .
# Output the original/"old" prompt.
_old_fish_prompt
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
end

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/f2py vendored

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/f2py3 vendored

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/f2py3.9 vendored

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/pip vendored

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/pip3 vendored

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/pip3.9 vendored

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

1
venv/bin/python vendored

@ -1 +0,0 @@
python3.9

1
venv/bin/python3 vendored

@ -1 +0,0 @@
python3.9

@ -1 +0,0 @@
/usr/local/bin/python3.9

8
venv/bin/tqdm vendored

@ -1,8 +0,0 @@
#!/Users/Zalum/Desktop/Tesi/PyCTBN/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tqdm.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,123 +0,0 @@
import sys
import os
import re
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools, but importing Setuptools "
"also replaces the `distutils` module in `sys.modules`. This may lead "
"to undesirable behaviors or errors. To avoid these issues, avoid "
"using distutils directly, ensure that setuptools is installed in the "
"traditional way (e.g. not an editable install), and/or make sure "
"that setuptools is always imported before distutils.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
if enabled():
warn_distutils_present()
ensure_local_distutils()
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
if path is not None:
return
method_name = 'spec_for_{fullname}'.format(**locals())
method = getattr(self, method_name, lambda: None)
return method()
def spec_for_distutils(self):
import importlib.abc
import importlib.util
class DistutilsLoader(importlib.abc.Loader):
def create_module(self, spec):
return importlib.import_module('setuptools._distutils')
def exec_module(self, module):
pass
return importlib.util.spec_from_loader('distutils', DistutilsLoader())
def spec_for_pip(self):
"""
Ensure stdlib distutils when running under pip.
See pypa/pip#8761 for rationale.
"""
if self.pip_imported_during_build():
return
clear_distutils()
self.spec_for_distutils = lambda: None
@staticmethod
def pip_imported_during_build():
"""
Detect if pip is being imported in a build script. Ref #2355.
"""
import traceback
return any(
frame.f_globals['__file__'].endswith('setup.py')
for frame, line in traceback.walk_stack(None)
)
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.meta_path.remove(DISTUTILS_FINDER)
except ValueError:
pass

@ -1 +0,0 @@
__import__('_distutils_hack').do_override()

@ -1,8 +0,0 @@
# -*- coding: utf-8 -*-
try:
from ._version import version as __version__
except ImportError:
__version__ = 'unknown'
__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz',
'utils', 'zoneinfo']

@ -1,43 +0,0 @@
"""
Common code used in multiple modules.
"""
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __hash__(self):
return hash((
self.weekday,
self.n,
))
def __ne__(self, other):
return not (self == other)
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
# vim:ts=4:sw=4:et

@ -1,4 +0,0 @@
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '2.8.1'

@ -1,89 +0,0 @@
# -*- coding: utf-8 -*-
"""
This module offers a generic easter computing method for any given year, using
Western, Orthodox or Julian algorithms.
"""
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
* ``EASTER_JULIAN = 1``
* ``EASTER_ORTHODOX = 2``
* ``EASTER_WESTERN = 3``
The default method is method 3.
More about the algorithm may be found at:
`GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_
and
`The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_
"""
if not (1 <= method <= 3):
raise ValueError("invalid method")
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g + 15) % 30
j = (y + y//4 + i) % 7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e + y//100 - 16 - (y//100 - 16)//4
else:
# New method
c = y//100
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
j = (y + y//4 + i + 2 - c + c//4) % 7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i - j + e
d = 1 + (p + 27 + (p + 6)//40) % 31
m = 3 + (p + 26)//30
return datetime.date(int(y), int(m), int(d))

@ -1,61 +0,0 @@
# -*- coding: utf-8 -*-
from ._parser import parse, parser, parserinfo, ParserError
from ._parser import DEFAULTPARSER, DEFAULTTZPARSER
from ._parser import UnknownTimezoneWarning
from ._parser import __doc__
from .isoparser import isoparser, isoparse
__all__ = ['parse', 'parser', 'parserinfo',
'isoparse', 'isoparser',
'ParserError',
'UnknownTimezoneWarning']
###
# Deprecate portions of the private interface so that downstream code that
# is improperly relying on it is given *some* notice.
def __deprecated_private_func(f):
from functools import wraps
import warnings
msg = ('{name} is a private function and may break without warning, '
'it will be moved and or renamed in future versions.')
msg = msg.format(name=f.__name__)
@wraps(f)
def deprecated_func(*args, **kwargs):
warnings.warn(msg, DeprecationWarning)
return f(*args, **kwargs)
return deprecated_func
def __deprecate_private_class(c):
import warnings
msg = ('{name} is a private class and may break without warning, '
'it will be moved and or renamed in future versions.')
msg = msg.format(name=c.__name__)
class private_class(c):
__doc__ = c.__doc__
def __init__(self, *args, **kwargs):
warnings.warn(msg, DeprecationWarning)
super(private_class, self).__init__(*args, **kwargs)
private_class.__name__ = c.__name__
return private_class
from ._parser import _timelex, _resultbase
from ._parser import _tzparser, _parsetz
_timelex = __deprecate_private_class(_timelex)
_tzparser = __deprecate_private_class(_tzparser)
_resultbase = __deprecate_private_class(_resultbase)
_parsetz = __deprecated_private_func(_parsetz)

File diff suppressed because it is too large Load Diff

@ -1,411 +0,0 @@
# -*- coding: utf-8 -*-
"""
This module offers a parser for ISO-8601 strings
It is intended to support all valid date, time and datetime formats per the
ISO-8601 specification.
..versionadded:: 2.7.0
"""
from datetime import datetime, timedelta, time, date
import calendar
from dateutil import tz
from functools import wraps
import re
import six
__all__ = ["isoparse", "isoparser"]
def _takes_ascii(f):
@wraps(f)
def func(self, str_in, *args, **kwargs):
# If it's a stream, read the whole thing
str_in = getattr(str_in, 'read', lambda: str_in)()
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
if isinstance(str_in, six.text_type):
# ASCII is the same in UTF-8
try:
str_in = str_in.encode('ascii')
except UnicodeEncodeError as e:
msg = 'ISO-8601 strings should contain only ASCII characters'
six.raise_from(ValueError(msg), e)
return f(self, str_in, *args, **kwargs)
return func
class isoparser(object):
def __init__(self, sep=None):
"""
:param sep:
A single character that separates date and time portions. If
``None``, the parser will accept any single character.
For strict ISO-8601 adherence, pass ``'T'``.
"""
if sep is not None:
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
raise ValueError('Separator must be a single, non-numeric ' +
'ASCII character')
sep = sep.encode('ascii')
self._sep = sep
@_takes_ascii
def isoparse(self, dt_str):
"""
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
An ISO-8601 datetime string consists of a date portion, followed
optionally by a time portion - the date and time portions are separated
by a single character separator, which is ``T`` in the official
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
combined with a time portion.
Supported date formats are:
Common:
- ``YYYY``
- ``YYYY-MM`` or ``YYYYMM``
- ``YYYY-MM-DD`` or ``YYYYMMDD``
Uncommon:
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
The ISO week and day numbering follows the same logic as
:func:`datetime.date.isocalendar`.
Supported time formats are:
- ``hh``
- ``hh:mm`` or ``hhmm``
- ``hh:mm:ss`` or ``hhmmss``
- ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
Midnight is a special case for `hh`, as the standard supports both
00:00 and 24:00 as a representation. The decimal separator can be
either a dot or a comma.
.. caution::
Support for fractional components other than seconds is part of the
ISO-8601 standard, but is not currently implemented in this parser.
Supported time zone offset formats are:
- `Z` (UTC)
- `±HH:MM`
- `±HHMM`
- `±HH`
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
with the exception of UTC, which will be represented as
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
:param dt_str:
A string or stream containing only an ISO-8601 datetime string
:return:
Returns a :class:`datetime.datetime` representing the string.
Unspecified components default to their lowest value.
.. warning::
As of version 2.7.0, the strictness of the parser should not be
considered a stable part of the contract. Any valid ISO-8601 string
that parses correctly with the default settings will continue to
parse correctly in future versions, but invalid strings that
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
guaranteed to continue failing in future versions if they encode
a valid date.
.. versionadded:: 2.7.0
"""
components, pos = self._parse_isodate(dt_str)
if len(dt_str) > pos:
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
components += self._parse_isotime(dt_str[pos + 1:])
else:
raise ValueError('String contains unknown ISO components')
if len(components) > 3 and components[3] == 24:
components[3] = 0
return datetime(*components) + timedelta(days=1)
return datetime(*components)
@_takes_ascii
def parse_isodate(self, datestr):
"""
Parse the date portion of an ISO string.
:param datestr:
The string portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.date` object
"""
components, pos = self._parse_isodate(datestr)
if pos < len(datestr):
raise ValueError('String contains unknown ISO ' +
'components: {}'.format(datestr))
return date(*components)
@_takes_ascii
def parse_isotime(self, timestr):
"""
Parse the time portion of an ISO string.
:param timestr:
The time portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.time` object
"""
components = self._parse_isotime(timestr)
if components[0] == 24:
components[0] = 0
return time(*components)
@_takes_ascii
def parse_tzstr(self, tzstr, zero_as_utc=True):
"""
Parse a valid ISO time zone string.
See :func:`isoparser.isoparse` for details on supported formats.
:param tzstr:
A string representing an ISO time zone offset
:param zero_as_utc:
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
:return:
Returns :class:`dateutil.tz.tzoffset` for offsets and
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
specified) offsets equivalent to UTC.
"""
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
# Constants
_DATE_SEP = b'-'
_TIME_SEP = b':'
_FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
def _parse_isodate(self, dt_str):
try:
return self._parse_isodate_common(dt_str)
except ValueError:
return self._parse_isodate_uncommon(dt_str)
def _parse_isodate_common(self, dt_str):
len_str = len(dt_str)
components = [1, 1, 1]
if len_str < 4:
raise ValueError('ISO string too short')
# Year
components[0] = int(dt_str[0:4])
pos = 4
if pos >= len_str:
return components, pos
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
if has_sep:
pos += 1
# Month
if len_str - pos < 2:
raise ValueError('Invalid common month')
components[1] = int(dt_str[pos:pos + 2])
pos += 2
if pos >= len_str:
if has_sep:
return components, pos
else:
raise ValueError('Invalid ISO format')
if has_sep:
if dt_str[pos:pos + 1] != self._DATE_SEP:
raise ValueError('Invalid separator in ISO string')
pos += 1
# Day
if len_str - pos < 2:
raise ValueError('Invalid common day')
components[2] = int(dt_str[pos:pos + 2])
return components, pos + 2
def _parse_isodate_uncommon(self, dt_str):
if len(dt_str) < 4:
raise ValueError('ISO string too short')
# All ISO formats start with the year
year = int(dt_str[0:4])
has_sep = dt_str[4:5] == self._DATE_SEP
pos = 4 + has_sep # Skip '-' if it's there
if dt_str[pos:pos + 1] == b'W':
# YYYY-?Www-?D?
pos += 1
weekno = int(dt_str[pos:pos + 2])
pos += 2
dayno = 1
if len(dt_str) > pos:
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
raise ValueError('Inconsistent use of dash separator')
pos += has_sep
dayno = int(dt_str[pos:pos + 1])
pos += 1
base_date = self._calculate_weekdate(year, weekno, dayno)
else:
# YYYYDDD or YYYY-DDD
if len(dt_str) - pos < 3:
raise ValueError('Invalid ordinal day')
ordinal_day = int(dt_str[pos:pos + 3])
pos += 3
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
raise ValueError('Invalid ordinal day' +
' {} for year {}'.format(ordinal_day, year))
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
components = [base_date.year, base_date.month, base_date.day]
return components, pos
def _calculate_weekdate(self, year, week, day):
"""
Calculate the day of corresponding to the ISO year-week-day calendar.
This function is effectively the inverse of
:func:`datetime.date.isocalendar`.
:param year:
The year in the ISO calendar
:param week:
The week in the ISO calendar - range is [1, 53]
:param day:
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
:return:
Returns a :class:`datetime.date`
"""
if not 0 < week < 54:
raise ValueError('Invalid week: {}'.format(week))
if not 0 < day < 8: # Range is 1-7
raise ValueError('Invalid weekday: {}'.format(day))
# Get week 1 for the specific year:
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
# Now add the specific number of weeks and days to get what we want
week_offset = (week - 1) * 7 + (day - 1)
return week_1 + timedelta(days=week_offset)
def _parse_isotime(self, timestr):
len_str = len(timestr)
components = [0, 0, 0, 0, None]
pos = 0
comp = -1
if len(timestr) < 2:
raise ValueError('ISO time too short')
has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP
while pos < len_str and comp < 5:
comp += 1
if timestr[pos:pos + 1] in b'-+Zz':
# Detect time zone boundary
components[-1] = self._parse_tzstr(timestr[pos:])
pos = len_str
break
if comp < 3:
# Hour, minute, second
components[comp] = int(timestr[pos:pos + 2])
pos += 2
if (has_sep and pos < len_str and
timestr[pos:pos + 1] == self._TIME_SEP):
pos += 1
if comp == 3:
# Fraction of a second
frac = self._FRACTION_REGEX.match(timestr[pos:])
if not frac:
continue
us_str = frac.group(1)[:6] # Truncate to microseconds
components[comp] = int(us_str) * 10**(6 - len(us_str))
pos += len(frac.group())
if pos < len_str:
raise ValueError('Unused components in ISO string')
if components[0] == 24:
# Standard supports 00:00 and 24:00 as representations of midnight
if any(component != 0 for component in components[1:4]):
raise ValueError('Hour may only be 24 at 24:00:00.000')
return components
def _parse_tzstr(self, tzstr, zero_as_utc=True):
if tzstr == b'Z' or tzstr == b'z':
return tz.UTC
if len(tzstr) not in {3, 5, 6}:
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
if tzstr[0:1] == b'-':
mult = -1
elif tzstr[0:1] == b'+':
mult = 1
else:
raise ValueError('Time zone offset requires sign')
hours = int(tzstr[1:3])
if len(tzstr) == 3:
minutes = 0
else:
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
if zero_as_utc and hours == 0 and minutes == 0:
return tz.UTC
else:
if minutes > 59:
raise ValueError('Invalid minutes in time zone offset')
if hours > 23:
raise ValueError('Invalid hours in time zone offset')
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
DEFAULT_ISOPARSER = isoparser()
isoparse = DEFAULT_ISOPARSER.isoparse

@ -1,599 +0,0 @@
# -*- coding: utf-8 -*-
import datetime
import calendar
import operator
from math import copysign
from six import integer_types
from warnings import warn
from ._common import weekday
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class relativedelta(object):
"""
The relativedelta type is designed to be applied to an existing datetime and
can replace specific components of that datetime, or represents an interval
of time.
It is based on the specification of the excellent work done by M.-A. Lemburg
in his
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an arithmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding arithmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc) available in the
relativedelta module. These instances may receive a parameter N,
specifying the Nth weekday, which could be positive or negative
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying
+1. You can also use an integer, where 0=MO. This argument is always
relative e.g. if the calculated date is already Monday, using MO(1)
or MO(-1) won't change the day. To effectively make it absolute, use
it in combination with the day argument (e.g. day=1, MO(1) for first
Monday of the month).
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
There are relative and absolute forms of the keyword
arguments. The plural is relative, and the singular is
absolute. For each argument in the order below, the absolute form
is applied first (by setting each attribute to that value) and
then the relative form (by adding the value to the attribute).
The order of attributes considered when this relativedelta is
added to a datetime is:
1. Year
2. Month
3. Day
4. Hours
5. Minutes
6. Seconds
7. Microseconds
Finally, weekday is applied, using the rule described above.
For example
>>> from datetime import datetime
>>> from dateutil.relativedelta import relativedelta, MO
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
>>> dt + delta
datetime.datetime(2018, 4, 2, 14, 37)
First, the day is set to 1 (the first of the month), then 25 hours
are added, to get to the 2nd day and 14th hour, finally the
weekday is applied, but since the 2nd is already a Monday there is
no effect.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
# Get year / month delta between the two
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
self._set_months(months)
# Remove the year/month delta so the timedelta is just well-defined
# time units (seconds, days and microseconds)
dtm = self.__radd__(dt2)
# If we've overshot our target, make an adjustment
if dt1 < dt2:
compare = operator.gt
increment = 1
else:
compare = operator.lt
increment = -1
while compare(dt1, dtm):
months += increment
self._set_months(months)
dtm = self.__radd__(dt2)
# Get the timedelta between the "months-adjusted" date and dt1
delta = dt1 - dtm
self.seconds = delta.seconds + delta.days * 86400
self.microseconds = delta.microseconds
else:
# Check for non-integer values in integer-only quantities
if any(x is not None and x != int(x) for x in (years, months)):
raise ValueError("Non-integer years and months are "
"ambiguous and not currently supported.")
# Relative information
self.years = int(years)
self.months = int(months)
self.days = days + weeks * 7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
# Absolute information
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if any(x is not None and int(x) != x
for x in (year, month, day, hour,
minute, second, microsecond)):
# For now we'll deprecate floats - later it'll be an error.
warn("Non-integer value passed as absolute information. " +
"This is not a well-defined condition and will raise " +
"errors in future versions.", DeprecationWarning)
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = _sign(self.microseconds)
div, mod = divmod(self.microseconds * s, 1000000)
self.microseconds = mod * s
self.seconds += div * s
if abs(self.seconds) > 59:
s = _sign(self.seconds)
div, mod = divmod(self.seconds * s, 60)
self.seconds = mod * s
self.minutes += div * s
if abs(self.minutes) > 59:
s = _sign(self.minutes)
div, mod = divmod(self.minutes * s, 60)
self.minutes = mod * s
self.hours += div * s
if abs(self.hours) > 23:
s = _sign(self.hours)
div, mod = divmod(self.hours * s, 24)
self.hours = mod * s
self.days += div * s
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years += div * s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
@property
def weeks(self):
return int(self.days / 7.0)
@weeks.setter
def weeks(self, value):
self.days = self.days - (self.weeks * 7) + value * 7
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years = div * s
else:
self.years = 0
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
def __add__(self, other):
if isinstance(other, relativedelta):
return self.__class__(years=other.years + self.years,
months=other.months + self.months,
days=other.days + self.days,
hours=other.hours + self.hours,
minutes=other.minutes + self.minutes,
seconds=other.seconds + self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=(other.year if other.year is not None
else self.year),
month=(other.month if other.month is not None
else self.month),
day=(other.day if other.day is not None
else self.day),
weekday=(other.weekday if other.weekday is not None
else self.weekday),
hour=(other.hour if other.hour is not None
else self.hour),
minute=(other.minute if other.minute is not None
else self.minute),
second=(other.second if other.second is not None
else self.second),
microsecond=(other.microsecond if other.microsecond
is not None else
self.microsecond))
if isinstance(other, datetime.timedelta):
return self.__class__(years=self.years,
months=self.months,
days=self.days + other.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds + other.seconds,
microseconds=self.microseconds + other.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
if not isinstance(other, datetime.date):
return NotImplemented
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth) - 1) * 7
if nth > 0:
jumpdays += (7 - ret.weekday() + weekday) % 7
else:
jumpdays += (ret.weekday() - weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented # In case the other object defines __rsub__
return self.__class__(years=self.years - other.years,
months=self.months - other.months,
days=self.days - other.days,
hours=self.hours - other.hours,
minutes=self.minutes - other.minutes,
seconds=self.seconds - other.seconds,
microseconds=self.microseconds - other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=(self.year if self.year is not None
else other.year),
month=(self.month if self.month is not None else
other.month),
day=(self.day if self.day is not None else
other.day),
weekday=(self.weekday if self.weekday is not None else
other.weekday),
hour=(self.hour if self.hour is not None else
other.hour),
minute=(self.minute if self.minute is not None else
other.minute),
second=(self.second if self.second is not None else
other.second),
microsecond=(self.microsecond if self.microsecond
is not None else
other.microsecond))
def __abs__(self):
return self.__class__(years=abs(self.years),
months=abs(self.months),
days=abs(self.days),
hours=abs(self.hours),
minutes=abs(self.minutes),
seconds=abs(self.seconds),
microseconds=abs(self.microseconds),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __neg__(self):
return self.__class__(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
try:
f = float(other)
except TypeError:
return NotImplemented
return self.__class__(years=int(self.years * f),
months=int(self.months * f),
days=int(self.days * f),
hours=int(self.hours * f),
minutes=int(self.minutes * f),
seconds=int(self.seconds * f),
microseconds=int(self.microseconds * f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.microseconds == other.microseconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __hash__(self):
return hash((
self.weekday,
self.years,
self.months,
self.days,
self.hours,
self.minutes,
self.seconds,
self.microseconds,
self.leapdays,
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
))
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
try:
reciprocal = 1 / float(other)
except TypeError:
return NotImplemented
return self.__mul__(reciprocal)
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("{attr}={value:+g}".format(attr=attr, value=value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
return "{classname}({attrs})".format(classname=self.__class__.__name__,
attrs=", ".join(l))
def _sign(x):
return int(copysign(1, x))
# vim:ts=4:sw=4:et

File diff suppressed because it is too large Load Diff

@ -1,12 +0,0 @@
# -*- coding: utf-8 -*-
from .tz import *
from .tz import __doc__
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
"enfold", "datetime_ambiguous", "datetime_exists",
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
class DeprecatedTzFormatWarning(Warning):
"""Warning raised when time zones are parsed from deprecated formats."""

@ -1,419 +0,0 @@
from six import PY2
from functools import wraps
from datetime import datetime, timedelta, tzinfo
ZERO = timedelta(0)
__all__ = ['tzname_in_python2', 'enfold']
def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
if PY2:
@wraps(namefunc)
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None:
name = name.encode()
return name
return adjust_encoding
else:
return namefunc
# The following is adapted from Alexander Belopolsky's tz library
# https://github.com/abalkin/tz
if hasattr(datetime, 'fold'):
# This is the pre-python 3.6 fold situation
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
return dt.replace(fold=fold)
else:
class _DatetimeWithFold(datetime):
"""
This is a class designed to provide a PEP 495-compliant interface for
Python versions before 3.6. It is used only for dates in a fold, so
the ``fold`` attribute is fixed at ``1``.
.. versionadded:: 2.6.0
"""
__slots__ = ()
def replace(self, *args, **kwargs):
"""
Return a datetime with the same attributes, except for those
attributes given new values by whichever keyword arguments are
specified. Note that tzinfo=None can be specified to create a naive
datetime from an aware datetime with no conversion of date and time
data.
This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
return a ``datetime.datetime`` even if ``fold`` is unchanged.
"""
argnames = (
'year', 'month', 'day', 'hour', 'minute', 'second',
'microsecond', 'tzinfo'
)
for arg, argname in zip(args, argnames):
if argname in kwargs:
raise TypeError('Duplicate argument: {}'.format(argname))
kwargs[argname] = arg
for argname in argnames:
if argname not in kwargs:
kwargs[argname] = getattr(self, argname)
dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
return dt_class(**kwargs)
@property
def fold(self):
return 1
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
if getattr(dt, 'fold', 0) == fold:
return dt
args = dt.timetuple()[:6]
args += (dt.microsecond, dt.tzinfo)
if fold:
return _DatetimeWithFold(*args)
else:
return datetime(*args)
def _validate_fromutc_inputs(f):
"""
The CPython version of ``fromutc`` checks that the input is a ``datetime``
object and that ``self`` is attached as its ``tzinfo``.
"""
@wraps(f)
def fromutc(self, dt):
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
return f(self, dt)
return fromutc
class _tzinfo(tzinfo):
"""
Base class for all ``dateutil`` ``tzinfo`` objects.
"""
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
dt = dt.replace(tzinfo=self)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
return same_dt and not same_offset
def _fold_status(self, dt_utc, dt_wall):
"""
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
"""
if self.is_ambiguous(dt_wall):
delta_wall = dt_wall - dt_utc
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
else:
_fold = 0
return _fold
def _fold(self, dt):
return getattr(dt, 'fold', 0)
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
dt_wall = self._fromutc(dt)
# Calculate the fold status given the two datetimes.
_fold = self._fold_status(dt, dt_wall)
# Set the default fold value for ambiguous dates
return enfold(dt_wall, fold=_fold)
class tzrangebase(_tzinfo):
"""
This is an abstract base class for time zones represented by an annual
transition into and out of DST. Child classes should implement the following
methods:
* ``__init__(self, *args, **kwargs)``
* ``transitions(self, year)`` - this is expected to return a tuple of
datetimes representing the DST on and off transitions in standard
time.
A fully initialized ``tzrangebase`` subclass should also provide the
following attributes:
* ``hasdst``: Boolean whether or not the zone uses DST.
* ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
representing the respective UTC offsets.
* ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
abbreviations in DST and STD, respectively.
* ``_hasdst``: Whether or not the zone has DST.
.. versionadded:: 2.6.0
"""
def __init__(self):
raise NotImplementedError('tzrangebase is an abstract base class')
def utcoffset(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_base_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold)
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if not self.hasdst:
return False
start, end = self.transitions(dt.year)
dt = dt.replace(tzinfo=None)
return (end <= dt < end + self._dst_base_offset)
def _isdst(self, dt):
if not self.hasdst:
return False
elif dt is None:
return None
transitions = self.transitions(dt.year)
if transitions is None:
return False
dt = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt, transitions)
# Handle ambiguous dates
if not isdst and self.is_ambiguous(dt):
return not self._fold(dt)
else:
return isdst
def _naive_isdst(self, dt, transitions):
dston, dstoff = transitions
dt = dt.replace(tzinfo=None)
if dston < dstoff:
isdst = dston <= dt < dstoff
else:
isdst = not dstoff <= dt < dston
return isdst
@property
def _dst_base_offset(self):
return self._dst_offset - self._std_offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__

@ -1,80 +0,0 @@
from datetime import timedelta
import weakref
from collections import OrderedDict
from six.moves import _thread
class _TzSingleton(type):
def __init__(cls, *args, **kwargs):
cls.__instance = None
super(_TzSingleton, cls).__init__(*args, **kwargs)
def __call__(cls):
if cls.__instance is None:
cls.__instance = super(_TzSingleton, cls).__call__()
return cls.__instance
class _TzFactory(type):
def instance(cls, *args, **kwargs):
"""Alternate constructor that returns a fresh instance"""
return type.__call__(cls, *args, **kwargs)
class _TzOffsetFactory(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = weakref.WeakValueDictionary()
cls.__strong_cache = OrderedDict()
cls.__strong_cache_size = 8
cls._cache_lock = _thread.allocate_lock()
def __call__(cls, name, offset):
if isinstance(offset, timedelta):
key = (name, offset.total_seconds())
else:
key = (name, offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(name, offset))
# This lock may not be necessary in Python 3. See GH issue #901
with cls._cache_lock:
cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
# Remove an item if the strong cache is overpopulated
if len(cls.__strong_cache) > cls.__strong_cache_size:
cls.__strong_cache.popitem(last=False)
return instance
class _TzStrFactory(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = weakref.WeakValueDictionary()
cls.__strong_cache = OrderedDict()
cls.__strong_cache_size = 8
cls.__cache_lock = _thread.allocate_lock()
def __call__(cls, s, posix_offset=False):
key = (s, posix_offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(s, posix_offset))
# This lock may not be necessary in Python 3. See GH issue #901
with cls.__cache_lock:
cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
# Remove an item if the strong cache is overpopulated
if len(cls.__strong_cache) > cls.__strong_cache_size:
cls.__strong_cache.popitem(last=False)
return instance

File diff suppressed because it is too large Load Diff

@ -1,370 +0,0 @@
# -*- coding: utf-8 -*-
"""
This module provides an interface to the native time zone data on Windows,
including :py:class:`datetime.tzinfo` implementations.
Attempting to import this module on a non-Windows platform will raise an
:py:obj:`ImportError`.
"""
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
from six.moves import winreg
from six import text_type
try:
import ctypes
from ctypes import wintypes
except ValueError:
# ValueError is raised on non-Windows systems for some horrible reason.
raise ImportError("Running tzwin on non-Windows system")
from ._common import tzrangebase
__all__ = ["tzwin", "tzwinlocal", "tzres"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
return TZKEYNAME
TZKEYNAME = _settzkeyname()
class tzres(object):
"""
Class for accessing ``tzres.dll``, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc='tzres.dll'):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL('user32')
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
.. note::
Offsets found in the registry are generally of the form
``@tzres.dll,-114``. The offset in this case is 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
class tzwinbase(tzrangebase):
"""tzinfo class based on win32's timezones available in the registry."""
def __init__(self):
raise NotImplementedError('tzwinbase is an abstract base class')
def __eq__(self, other):
# Compare on all relevant dimensions, including name.
if not isinstance(other, tzwinbase):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._stddayofweek == other._stddayofweek and
self._dstdayofweek == other._dstdayofweek and
self._stdweeknumber == other._stdweeknumber and
self._dstweeknumber == other._dstweeknumber and
self._stdhour == other._stdhour and
self._dsthour == other._dsthour and
self._stdminute == other._stdminute and
self._dstminute == other._dstminute and
self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr)
@staticmethod
def list():
"""Return a list of all time zones known to the system."""
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
return result
def display(self):
"""
Return the display name of the time zone.
"""
return self._display
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff
def _get_hasdst(self):
return self._dstmonth != 0
@property
def _dst_base_offset(self):
return self._dst_base_offset_
class tzwin(tzwinbase):
"""
Time zone object created from the zone info in the Windows registry
These are similar to :py:class:`dateutil.tz.tzrange` objects in that
the time zone data is provided in the format of a single offset rule
for either 0 or 2 time zone transitions per year.
:param: name
The name of a Windows time zone key, e.g. "Eastern Standard Time".
The full list of keys can be retrieved with :func:`tzwin.list`.
"""
def __init__(self, name):
self._name = name
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset-tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
"""
Class representing the local time zone information in the Windows registry
While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time`
module) to retrieve time zone information, ``tzwinlocal`` retrieves the
rules directly from the Windows registry and creates an object like
:class:`dateutil.tz.tzwin`.
Because Windows does not have an equivalent of :func:`time.tzset`, on
Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the
time zone settings *at the time that the process was started*, meaning
changes to the machine's time zone settings during the run of a program
on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`.
Because ``tzwinlocal`` reads the registry directly, it is unaffected by
this issue.
"""
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._std_abbr = keydict["StandardName"]
self._dst_abbr = keydict["DaylightName"]
try:
tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
sn=self._std_abbr)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
stdoffset = -keydict["Bias"]-keydict["StandardBias"]
dstoffset = stdoffset-keydict["DaylightBias"]
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# For reasons unclear, in this particular key, the day of week has been
# moved to the END of the SYSTEMTIME structure.
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:5]
self._stddayofweek = tup[7]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:5]
self._dstdayofweek = tup[7]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwinlocal()"
def __str__(self):
# str will return the standard name, not the daylight name.
return "tzwinlocal(%s)" % repr(self._std_abbr)
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if (wd.month != month):
wd -= ONEWEEK
return wd
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith('@tzres'):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip('\x00') # Remove trailing nulls
dout[key_name] = value
return dout

@ -1,2 +0,0 @@
# tzwin has moved to dateutil.tz.win
from .tz.win import *

@ -1,71 +0,0 @@
# -*- coding: utf-8 -*-
"""
This module offers general convenience and utility functions for dealing with
datetimes.
.. versionadded:: 2.7.0
"""
from __future__ import unicode_literals
from datetime import datetime, time
def today(tzinfo=None):
"""
Returns a :py:class:`datetime` representing the current day at midnight
:param tzinfo:
The time zone to attach (also used to determine the current day).
:return:
A :py:class:`datetime.datetime` object representing the current day
at midnight.
"""
dt = datetime.now(tzinfo)
return datetime.combine(dt.date(), time(0, tzinfo=tzinfo))
def default_tzinfo(dt, tzinfo):
"""
Sets the ``tzinfo`` parameter on naive datetimes only
This is useful for example when you are provided a datetime that may have
either an implicit or explicit time zone, such as when parsing a time zone
string.
.. doctest::
>>> from dateutil.tz import tzoffset
>>> from dateutil.parser import parse
>>> from dateutil.utils import default_tzinfo
>>> dflt_tz = tzoffset("EST", -18000)
>>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz))
2014-01-01 12:30:00+00:00
>>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz))
2014-01-01 12:30:00-05:00
:param dt:
The datetime on which to replace the time zone
:param tzinfo:
The :py:class:`datetime.tzinfo` subclass instance to assign to
``dt`` if (and only if) it is naive.
:return:
Returns an aware :py:class:`datetime.datetime`.
"""
if dt.tzinfo is not None:
return dt
else:
return dt.replace(tzinfo=tzinfo)
def within_delta(dt1, dt2, delta):
"""
Useful for comparing two datetimes that may a negilible difference
to be considered equal.
"""
delta = abs(delta)
difference = dt1 - dt2
return -delta <= difference <= delta

@ -1,167 +0,0 @@
# -*- coding: utf-8 -*-
import warnings
import json
from tarfile import TarFile
from pkgutil import get_data
from io import BytesIO
from dateutil.tz import tzfile as _tzfile
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
METADATA_FN = 'METADATA'
class tzfile(_tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile_stream():
try:
return BytesIO(get_data(__name__, ZONEFILENAME))
except IOError as e: # TODO switch to FileNotFoundError?
warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
return None
class ZoneInfoFile(object):
def __init__(self, zonefile_stream=None):
if zonefile_stream is not None:
with TarFile.open(fileobj=zonefile_stream) as tf:
self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name)
for zf in tf.getmembers()
if zf.isfile() and zf.name != METADATA_FN}
# deal with links: They'll point to their parent object. Less
# waste of memory
links = {zl.name: self.zones[zl.linkname]
for zl in tf.getmembers() if
zl.islnk() or zl.issym()}
self.zones.update(links)
try:
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
metadata_str = metadata_json.read().decode('UTF-8')
self.metadata = json.loads(metadata_str)
except KeyError:
# no metadata in tar file
self.metadata = None
else:
self.zones = {}
self.metadata = None
def get(self, name, default=None):
"""
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
for retrieving zones from the zone dictionary.
:param name:
The name of the zone to retrieve. (Generally IANA zone names)
:param default:
The value to return in the event of a missing key.
.. versionadded:: 2.6.0
"""
return self.zones.get(name, default)
# The current API has gettz as a module function, although in fact it taps into
# a stateful class. So as a workaround for now, without changing the API, we
# will create a new "global" class instance the first time a user requests a
# timezone. Ugly, but adheres to the api.
#
# TODO: Remove after deprecation period.
_CLASS_ZONE_INSTANCE = []
def get_zonefile_instance(new_instance=False):
"""
This is a convenience function which provides a :class:`ZoneInfoFile`
instance using the data provided by the ``dateutil`` package. By default, it
caches a single instance of the ZoneInfoFile object and returns that.
:param new_instance:
If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
used as the cached instance for the next call. Otherwise, new instances
are created only as necessary.
:return:
Returns a :class:`ZoneInfoFile` object.
.. versionadded:: 2.6
"""
if new_instance:
zif = None
else:
zif = getattr(get_zonefile_instance, '_cached_instance', None)
if zif is None:
zif = ZoneInfoFile(getzoneinfofile_stream())
get_zonefile_instance._cached_instance = zif
return zif
def gettz(name):
"""
This retrieves a time zone from the local zoneinfo tarball that is packaged
with dateutil.
:param name:
An IANA-style time zone name, as found in the zoneinfo file.
:return:
Returns a :class:`dateutil.tz.tzfile` time zone object.
.. warning::
It is generally inadvisable to use this function, and it is only
provided for API compatibility with earlier versions. This is *not*
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
time zone based on the inputs, favoring system zoneinfo. This is ONLY
for accessing the dateutil-specific zoneinfo (which may be out of
date compared to the system zoneinfo).
.. deprecated:: 2.6
If you need to use a specific zoneinfofile over the system zoneinfo,
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
Use :func:`get_zonefile_instance` to retrieve an instance of the
dateutil-provided zoneinfo.
"""
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
"to use the dateutil-provided zoneinfo files, instantiate a "
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].zones.get(name)
def gettz_db_metadata():
""" Get the zonefile metadata
See `zonefile_metadata`_
:returns:
A dictionary with the database metadata
.. deprecated:: 2.6
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
"""
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
"versions, to use the dateutil-provided zoneinfo files, "
"ZoneInfoFile object and query the 'metadata' attribute "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].metadata

@ -1,53 +0,0 @@
import logging
import os
import tempfile
import shutil
import json
from subprocess import check_call
from tarfile import TarFile
from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
filename is the timezone tarball from ``ftp.iana.org/tz``.
"""
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
try:
with TarFile.open(filename) as tf:
for name in zonegroups:
tf.extract(name, tmpdir)
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
try:
check_call(["zic", "-d", zonedir] + filepaths)
except OSError as e:
_print_on_nosuchfile(e)
raise
# write metadata file
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
json.dump(metadata, f, indent=4, sort_keys=True)
target = os.path.join(moduledir, ZONEFILENAME)
with TarFile.open(target, "w:%s" % format) as tf:
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
finally:
shutil.rmtree(tmpdir)
def _print_on_nosuchfile(e):
"""Print helpful troubleshooting message
e is an exception raised by subprocess.check_call()
"""
if e.errno == 2:
logging.error(
"Could not find zic. Perhaps you need to install "
"libc-bin or some other package that provides it, "
"or it's not in your PATH?")

@ -1,26 +0,0 @@
Copyright (c) 2005-2018, Michele Simionato
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in bytecode form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.

@ -1,131 +0,0 @@
Metadata-Version: 2.1
Name: decorator
Version: 4.4.2
Summary: Decorators for Humans
Home-page: https://github.com/micheles/decorator
Author: Michele Simionato
Author-email: michele.simionato@gmail.com
License: new BSD License
Keywords: decorators generic utility
Platform: All
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Natural Language :: English
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Topic :: Software Development :: Libraries
Classifier: Topic :: Utilities
Requires-Python: >=2.6, !=3.0.*, !=3.1.*
Decorators for Humans
=====================
The goal of the decorator module is to make it easy to define
signature-preserving function decorators and decorator factories.
It also includes an implementation of multiple dispatch and other niceties
(please check the docs). It is released under a two-clauses
BSD license, i.e. basically you can do whatever you want with it but I am not
responsible.
Installation
-------------
If you are lazy, just perform
``$ pip install decorator``
which will install just the module on your system.
If you prefer to install the full distribution from source, including
the documentation, clone the `GitHub repo`_ or download the tarball_, unpack it and run
``$ pip install .``
in the main directory, possibly as superuser.
.. _tarball: https://pypi.org/project/decorator/#files
.. _GitHub repo: https://github.com/micheles/decorator
Testing
--------
If you have the source code installation you can run the tests with
`$ python src/tests/test.py -v`
or (if you have setuptools installed)
`$ python setup.py test`
Notice that you may run into trouble if in your system there
is an older version of the decorator module; in such a case remove the
old version. It is safe even to copy the module `decorator.py` over
an existing one, since we kept backward-compatibility for a long time.
Repository
---------------
The project is hosted on GitHub. You can look at the source here:
https://github.com/micheles/decorator
Documentation
---------------
The documentation has been moved to https://github.com/micheles/decorator/blob/master/docs/documentation.md
From there you can get a PDF version by simply using the print
functionality of your browser.
Here is the documentation for previous versions of the module:
https://github.com/micheles/decorator/blob/4.3.2/docs/tests.documentation.rst
https://github.com/micheles/decorator/blob/4.2.1/docs/tests.documentation.rst
https://github.com/micheles/decorator/blob/4.1.2/docs/tests.documentation.rst
https://github.com/micheles/decorator/blob/4.0.0/documentation.rst
https://github.com/micheles/decorator/blob/3.4.2/documentation.rst
For the impatient
-----------------
Here is an example of how to define a family of decorators tracing slow
operations:
.. code-block:: python
from decorator import decorator
@decorator
def warn_slow(func, timelimit=60, *args, **kw):
t0 = time.time()
result = func(*args, **kw)
dt = time.time() - t0
if dt > timelimit:
logging.warn('%s took %d seconds', func.__name__, dt)
else:
logging.info('%s took %d seconds', func.__name__, dt)
return result
@warn_slow # warn if it takes more than 1 minute
def preprocess_input_files(inputdir, tempdir):
...
@warn_slow(timelimit=600) # warn if it takes more than 10 minutes
def run_calculation(tempdir, outdir):
...
Enjoy!

@ -1,9 +0,0 @@
__pycache__/decorator.cpython-39.pyc,,
decorator-4.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
decorator-4.4.2.dist-info/LICENSE.txt,sha256=_RFmDKvwUyCCxFcGhi-vwpSQfsf44heBgkCkmZgGeC4,1309
decorator-4.4.2.dist-info/METADATA,sha256=RYLh5Qy8XzYOcgCT6RsI_cTXG_PE1QvoAVT-u2vus80,4168
decorator-4.4.2.dist-info/RECORD,,
decorator-4.4.2.dist-info/WHEEL,sha256=h_aVn5OB2IERUjMbi2pucmR_zzWJtk303YXvhh60NJ8,110
decorator-4.4.2.dist-info/pbr.json,sha256=AL84oUUWQHwkd8OCPhLRo2NJjU5MDdmXMqRHv-posqs,47
decorator-4.4.2.dist-info/top_level.txt,sha256=Kn6eQjo83ctWxXVyBMOYt0_YpjRjBznKYVuNyuC_DSI,10
decorator.py,sha256=aQ8Ozc-EK26xBTOXVR5A-8Szgx99_bhaexZSGNn38Yc,17222

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.33.4)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

@ -1,454 +0,0 @@
# ######################### LICENSE ############################ #
# Copyright (c) 2005-2018, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
from __future__ import print_function
import re
import sys
import inspect
import operator
import itertools
import collections
__version__ = '4.4.2'
if sys.version_info >= (3,):
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
FullArgSpec = collections.namedtuple(
'FullArgSpec', 'args varargs varkw defaults '
'kwonlyargs kwonlydefaults annotations')
def getfullargspec(f):
"A quick and dirty replacement for getfullargspec for Python 2.X"
return FullArgSpec._make(inspect.getargspec(f) + ([], None, {}))
def get_init(cls):
return cls.__init__.__func__
try:
iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# let's assume there are no coroutine functions in old Python
def iscoroutinefunction(f):
return False
try:
from inspect import isgeneratorfunction
except ImportError:
# assume no generator function in old Python versions
def isgeneratorfunction(caller):
return False
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
# make pylint happy
args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = ()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults or None
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.search(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % next(self._compile_count)
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except Exception:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
caller = evaldict.get('_call_') # when called from `decorate`
if caller and iscoroutinefunction(caller):
body = ('async def %(name)s(%(signature)s):\n' + ibody).replace(
'return', 'return await')
else:
body = 'def %(name)s(%(signature)s):\n' + ibody
return self.make(body, evaldict, addsource, **attrs)
def decorate(func, caller, extras=()):
"""
decorate(func, caller) decorates a function using a caller.
If the caller is a generator function, the resulting function
will be a generator function.
"""
evaldict = dict(_call_=caller, _func_=func)
es = ''
for i, extra in enumerate(extras):
ex = '_e%d_' % i
evaldict[ex] = extra
es += ex + ', '
if '3.5' <= sys.version < '3.6':
# with Python 3.5 isgeneratorfunction returns True for all coroutines
# however we know that it is NOT possible to have a generator
# coroutine in python 3.5: PEP525 was not there yet
generatorcaller = isgeneratorfunction(
caller) and not iscoroutinefunction(caller)
else:
generatorcaller = isgeneratorfunction(caller)
if generatorcaller:
fun = FunctionMaker.create(
func, "for res in _call_(_func_, %s%%(shortsignature)s):\n"
" yield res" % es, evaldict, __wrapped__=func)
else:
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
defaultargs, defaults = '', ()
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
nargs = caller.__code__.co_argcount
ndefs = len(caller.__defaults__ or ())
defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])
if defaultargs:
defaultargs += ','
defaults = caller.__defaults__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(func, %s)' % (name, defaultargs),
'if func is None: return lambda func: _decorate_(func, _call, (%s))\n'
'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),
evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
if defaults:
dec.__defaults__ = (None,) + defaults
return dec
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
class ContextManager(_GeneratorContextManager):
def __call__(self, func):
"""Context manager decorator"""
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
init = getfullargspec(_GeneratorContextManager.__init__)
n_args = len(init.args)
if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g(*a, **k))
ContextManager.__init__ = __init__
elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
pass
elif n_args == 4: # (self, gen, args, kwds) Python 3.5
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g, a, k)
ContextManager.__init__ = __init__
_contextmanager = decorator(ContextManager)
def contextmanager(func):
# Enable Pylint config: contextmanager-decorators=decorator.contextmanager
return _contextmanager(func)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec

@ -1 +0,0 @@
import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'stdlib') == 'local'; enabled and __import__('_distutils_hack').add_shim();

@ -1,5 +0,0 @@
"""Run the EasyInstall command"""
if __name__ == '__main__':
from setuptools.command.easy_install import main
main()

@ -1,40 +0,0 @@
License
=======
NetworkX is distributed with the 3-clause BSD license.
::
Copyright (C) 2004-2020, NetworkX Developers
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the NetworkX Developers nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -1,142 +0,0 @@
Metadata-Version: 2.1
Name: networkx
Version: 2.5
Summary: Python package for creating and manipulating graphs and networks
Home-page: http://networkx.github.io/
Author: Aric Hagberg
Author-email: hagberg@lanl.gov
Maintainer: NetworkX Developers
Maintainer-email: networkx-discuss@googlegroups.com
License: UNKNOWN
Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues
Project-URL: Documentation, https://networkx.github.io/documentation/stable/
Project-URL: Source Code, https://github.com/networkx/networkx
Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math
Platform: Linux
Platform: Mac OSX
Platform: Windows
Platform: Unix
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: Intended Audience :: Science/Research
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
Classifier: Topic :: Scientific/Engineering :: Information Analysis
Classifier: Topic :: Scientific/Engineering :: Mathematics
Classifier: Topic :: Scientific/Engineering :: Physics
Requires-Python: >=3.6
Requires-Dist: decorator (>=4.3.0)
Provides-Extra: all
Requires-Dist: numpy ; extra == 'all'
Requires-Dist: scipy ; extra == 'all'
Requires-Dist: pandas ; extra == 'all'
Requires-Dist: matplotlib ; extra == 'all'
Requires-Dist: pygraphviz ; extra == 'all'
Requires-Dist: pydot ; extra == 'all'
Requires-Dist: pyyaml ; extra == 'all'
Requires-Dist: lxml ; extra == 'all'
Requires-Dist: pytest ; extra == 'all'
Provides-Extra: gdal
Requires-Dist: gdal ; extra == 'gdal'
Provides-Extra: lxml
Requires-Dist: lxml ; extra == 'lxml'
Provides-Extra: matplotlib
Requires-Dist: matplotlib ; extra == 'matplotlib'
Provides-Extra: numpy
Requires-Dist: numpy ; extra == 'numpy'
Provides-Extra: pandas
Requires-Dist: pandas ; extra == 'pandas'
Provides-Extra: pydot
Requires-Dist: pydot ; extra == 'pydot'
Provides-Extra: pygraphviz
Requires-Dist: pygraphviz ; extra == 'pygraphviz'
Provides-Extra: pytest
Requires-Dist: pytest ; extra == 'pytest'
Provides-Extra: pyyaml
Requires-Dist: pyyaml ; extra == 'pyyaml'
Provides-Extra: scipy
Requires-Dist: scipy ; extra == 'scipy'
NetworkX
========
.. image:: https://img.shields.io/pypi/v/networkx.svg
:target: https://pypi.org/project/networkx/
.. image:: https://img.shields.io/pypi/pyversions/networkx.svg
:target: https://pypi.org/project/networkx/
.. image:: https://travis-ci.org/networkx/networkx.svg?branch=master
:target: https://travis-ci.org/networkx/networkx
.. image:: https://ci.appveyor.com/api/projects/status/github/networkx/networkx?branch=master&svg=true
:target: https://ci.appveyor.com/project/dschult/networkx-pqott
.. image:: https://codecov.io/gh/networkx/networkx/branch/master/graph/badge.svg
:target: https://codecov.io/gh/networkx/networkx
NetworkX is a Python package for the creation, manipulation,
and study of the structure, dynamics, and functions
of complex networks.
- **Website (including documentation):** https://networkx.github.io
- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss
- **Source:** https://github.com/networkx/networkx
- **Bug reports:** https://github.com/networkx/networkx/issues
Simple example
--------------
Find the shortest path between two nodes in an undirected graph:
.. code:: python
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edge('A', 'B', weight=4)
>>> G.add_edge('B', 'D', weight=2)
>>> G.add_edge('A', 'C', weight=3)
>>> G.add_edge('C', 'D', weight=4)
>>> nx.shortest_path(G, 'A', 'D', weight='weight')
['A', 'B', 'D']
Install
-------
Install the latest version of NetworkX::
$ pip install networkx
Install with all optional dependencies::
$ pip install networkx[all]
For additional details, please see `INSTALL.rst`.
Bugs
----
Please report any bugs that you find `here <https://github.com/networkx/networkx/issues>`_.
Or, even better, fork the repository on `GitHub <https://github.com/networkx/networkx>`_
and create a pull request (PR). We welcome all changes, big or small, and we
will help you make the PR if you are new to `git` (just ask on the issue and/or
see `CONTRIBUTING.rst`).
License
-------
Released under the 3-Clause BSD license (see `LICENSE.txt`)::
Copyright (C) 2004-2020 NetworkX Developers
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>

File diff suppressed because it is too large Load Diff

@ -1,5 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.35.1)
Root-Is-Purelib: true
Tag: py3-none-any

@ -1,76 +0,0 @@
"""
NetworkX
========
NetworkX is a Python package for the creation, manipulation, and study of the
structure, dynamics, and functions of complex networks.
See https://networkx.github.io for complete documentation.
"""
import sys
if sys.version_info[:2] < (3, 6):
m = "Python 3.6 or later is required for NetworkX (%d.%d detected)."
raise ImportError(m % sys.version_info[:2])
del sys
# Release data
from networkx import release
__author__ = (
f"{release.authors['Hagberg'][0]} <{release.authors['Hagberg'][1]}>\n"
f"{release.authors['Schult'][0]} <{release.authors['Schult'][1]}>\n"
f"{release.authors['Swart'][0]} <{release.authors['Swart'][1]}>"
)
__date__ = release.date
__version__ = release.version
__bibtex__ = """@inproceedings{hagberg-2008-exploring,
author = {Aric A. Hagberg and Daniel A. Schult and Pieter J. Swart},
title = {Exploring network structure, dynamics, and function using {NetworkX}},
year = {2008},
month = Aug,
urlpdf = {http://math.lanl.gov/~hagberg/Papers/hagberg-2008-exploring.pdf},
booktitle = {Proceedings of the 7th Python in Science Conference (SciPy2008)},
editors = {G\"{a}el Varoquaux, Travis Vaught, and Jarrod Millman},
address = {Pasadena, CA USA},
pages = {11--15}
}"""
# These are import orderwise
from networkx.exception import *
import networkx.utils
import networkx.classes.filters
import networkx.classes
from networkx.classes import *
import networkx.convert
from networkx.convert import *
import networkx.convert_matrix
from networkx.convert_matrix import *
import networkx.relabel
from networkx.relabel import *
import networkx.generators
from networkx.generators import *
import networkx.readwrite
from networkx.readwrite import *
# Need to test with SciPy, when available
import networkx.algorithms
from networkx.algorithms import *
import networkx.linalg
from networkx.linalg import *
from networkx.testing.test import run as test
import networkx.drawing
from networkx.drawing import *

@ -1,125 +0,0 @@
from networkx.algorithms.assortativity import *
from networkx.algorithms.asteroidal import *
from networkx.algorithms.boundary import *
from networkx.algorithms.bridges import *
from networkx.algorithms.chains import *
from networkx.algorithms.centrality import *
from networkx.algorithms.chordal import *
from networkx.algorithms.cluster import *
from networkx.algorithms.clique import *
from networkx.algorithms.communicability_alg import *
from networkx.algorithms.components import *
from networkx.algorithms.coloring import *
from networkx.algorithms.core import *
from networkx.algorithms.covering import *
from networkx.algorithms.cycles import *
from networkx.algorithms.cuts import *
from networkx.algorithms.d_separation import *
from networkx.algorithms.dag import *
from networkx.algorithms.distance_measures import *
from networkx.algorithms.distance_regular import *
from networkx.algorithms.dominance import *
from networkx.algorithms.dominating import *
from networkx.algorithms.efficiency_measures import *
from networkx.algorithms.euler import *
from networkx.algorithms.graphical import *
from networkx.algorithms.hierarchy import *
from networkx.algorithms.hybrid import *
from networkx.algorithms.link_analysis import *
from networkx.algorithms.link_prediction import *
from networkx.algorithms.lowest_common_ancestors import *
from networkx.algorithms.isolate import *
from networkx.algorithms.matching import *
from networkx.algorithms.minors import *
from networkx.algorithms.mis import *
from networkx.algorithms.moral import *
from networkx.algorithms.non_randomness import *
from networkx.algorithms.operators import *
from networkx.algorithms.planarity import *
from networkx.algorithms.planar_drawing import *
from networkx.algorithms.reciprocity import *
from networkx.algorithms.regular import *
from networkx.algorithms.richclub import *
from networkx.algorithms.shortest_paths import *
from networkx.algorithms.similarity import *
from networkx.algorithms.graph_hashing import *
from networkx.algorithms.simple_paths import *
from networkx.algorithms.smallworld import *
from networkx.algorithms.smetric import *
from networkx.algorithms.structuralholes import *
from networkx.algorithms.sparsifiers import *
from networkx.algorithms.swap import *
from networkx.algorithms.traversal import *
from networkx.algorithms.triads import *
from networkx.algorithms.vitality import *
from networkx.algorithms.voronoi import *
from networkx.algorithms.wiener import *
# Make certain subpackages available to the user as direct imports from
# the `networkx` namespace.
import networkx.algorithms.assortativity
import networkx.algorithms.bipartite
import networkx.algorithms.node_classification
import networkx.algorithms.centrality
import networkx.algorithms.chordal
import networkx.algorithms.cluster
import networkx.algorithms.clique
import networkx.algorithms.components
import networkx.algorithms.connectivity
import networkx.algorithms.community
import networkx.algorithms.coloring
import networkx.algorithms.flow
import networkx.algorithms.isomorphism
import networkx.algorithms.link_analysis
import networkx.algorithms.lowest_common_ancestors
import networkx.algorithms.operators
import networkx.algorithms.shortest_paths
import networkx.algorithms.tournament
import networkx.algorithms.traversal
import networkx.algorithms.tree
# Make certain functions from some of the previous subpackages available
# to the user as direct imports from the `networkx` namespace.
from networkx.algorithms.bipartite import complete_bipartite_graph
from networkx.algorithms.bipartite import is_bipartite
from networkx.algorithms.bipartite import project
from networkx.algorithms.bipartite import projected_graph
from networkx.algorithms.connectivity import all_pairs_node_connectivity
from networkx.algorithms.connectivity import all_node_cuts
from networkx.algorithms.connectivity import average_node_connectivity
from networkx.algorithms.connectivity import edge_connectivity
from networkx.algorithms.connectivity import edge_disjoint_paths
from networkx.algorithms.connectivity import k_components
from networkx.algorithms.connectivity import k_edge_components
from networkx.algorithms.connectivity import k_edge_subgraphs
from networkx.algorithms.connectivity import k_edge_augmentation
from networkx.algorithms.connectivity import is_k_edge_connected
from networkx.algorithms.connectivity import minimum_edge_cut
from networkx.algorithms.connectivity import minimum_node_cut
from networkx.algorithms.connectivity import node_connectivity
from networkx.algorithms.connectivity import node_disjoint_paths
from networkx.algorithms.connectivity import stoer_wagner
from networkx.algorithms.flow import capacity_scaling
from networkx.algorithms.flow import cost_of_flow
from networkx.algorithms.flow import gomory_hu_tree
from networkx.algorithms.flow import max_flow_min_cost
from networkx.algorithms.flow import maximum_flow
from networkx.algorithms.flow import maximum_flow_value
from networkx.algorithms.flow import min_cost_flow
from networkx.algorithms.flow import min_cost_flow_cost
from networkx.algorithms.flow import minimum_cut
from networkx.algorithms.flow import minimum_cut_value
from networkx.algorithms.flow import network_simplex
from networkx.algorithms.isomorphism import could_be_isomorphic
from networkx.algorithms.isomorphism import fast_could_be_isomorphic
from networkx.algorithms.isomorphism import faster_could_be_isomorphic
from networkx.algorithms.isomorphism import is_isomorphic
from networkx.algorithms.tree.branchings import maximum_branching
from networkx.algorithms.tree.branchings import maximum_spanning_arborescence
from networkx.algorithms.tree.branchings import minimum_branching
from networkx.algorithms.tree.branchings import minimum_spanning_arborescence
from networkx.algorithms.tree.coding import *
from networkx.algorithms.tree.decomposition import *
from networkx.algorithms.tree.mst import *
from networkx.algorithms.tree.operations import *
from networkx.algorithms.tree.recognition import *

@ -1,22 +0,0 @@
"""Approximations of graph properties and Heuristic functions for optimization
problems.
.. warning:: The approximation submodule is not imported in the top-level
``networkx``.
These functions can be imported with
``from networkx.algorithms import approximation``.
"""
from networkx.algorithms.approximation.clustering_coefficient import *
from networkx.algorithms.approximation.clique import *
from networkx.algorithms.approximation.connectivity import *
from networkx.algorithms.approximation.dominating_set import *
from networkx.algorithms.approximation.kcomponents import *
from networkx.algorithms.approximation.independent_set import *
from networkx.algorithms.approximation.matching import *
from networkx.algorithms.approximation.ramsey import *
from networkx.algorithms.approximation.steinertree import *
from networkx.algorithms.approximation.vertex_cover import *
from networkx.algorithms.approximation.treewidth import *

@ -1,159 +0,0 @@
"""Functions for computing large cliques."""
import networkx as nx
from networkx.utils import not_implemented_for
from networkx.algorithms.approximation import ramsey
__all__ = ["clique_removal", "max_clique", "large_clique_size"]
def max_clique(G):
r"""Find the Maximum Clique
Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set
in the worst case.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
clique : set
The apx-maximum clique of the graph
Notes
------
A clique in an undirected graph G = (V, E) is a subset of the vertex set
`C \subseteq V` such that for every two vertices in C there exists an edge
connecting the two. This is equivalent to saying that the subgraph
induced by C is complete (in some cases, the term clique may also refer
to the subgraph).
A maximum clique is a clique of the largest possible size in a given graph.
The clique number `\omega(G)` of a graph G is the number of
vertices in a maximum clique in G. The intersection number of
G is the smallest number of cliques that together cover all edges of G.
https://en.wikipedia.org/wiki/Maximum_clique
References
----------
.. [1] Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180196. Springer.
doi:10.1007/BF01994876
"""
if G is None:
raise ValueError("Expected NetworkX graph!")
# finding the maximum clique in a graph is equivalent to finding
# the independent set in the complementary graph
cgraph = nx.complement(G)
iset, _ = clique_removal(cgraph)
return iset
def clique_removal(G):
r""" Repeatedly remove cliques from the graph.
Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique
and independent set. Returns the largest independent set found, along
with found maximal cliques.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
max_ind_cliques : (set, list) tuple
2-tuple of Maximal Independent Set and list of maximal cliques (sets).
References
----------
.. [1] Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180196. Springer.
"""
graph = G.copy()
c_i, i_i = ramsey.ramsey_R2(graph)
cliques = [c_i]
isets = [i_i]
while graph:
graph.remove_nodes_from(c_i)
c_i, i_i = ramsey.ramsey_R2(graph)
if c_i:
cliques.append(c_i)
if i_i:
isets.append(i_i)
# Determine the largest independent set as measured by cardinality.
maxiset = max(isets, key=len)
return maxiset, cliques
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def large_clique_size(G):
"""Find the size of a large clique in a graph.
A *clique* is a subset of nodes in which each pair of nodes is
adjacent. This function is a heuristic for finding the size of a
large clique in the graph.
Parameters
----------
G : NetworkX graph
Returns
-------
int
The size of a large clique in the graph.
Notes
-----
This implementation is from [1]_. Its worst case time complexity is
:math:`O(n d^2)`, where *n* is the number of nodes in the graph and
*d* is the maximum degree.
This function is a heuristic, which means it may work well in
practice, but there is no rigorous mathematical guarantee on the
ratio between the returned number and the actual largest clique size
in the graph.
References
----------
.. [1] Pattabiraman, Bharath, et al.
"Fast Algorithms for the Maximum Clique Problem on Massive Graphs
with Applications to Overlapping Community Detection."
*Internet Mathematics* 11.4-5 (2015): 421--448.
<https://doi.org/10.1080/15427951.2014.986778>
See also
--------
:func:`networkx.algorithms.approximation.clique.max_clique`
A function that returns an approximate maximum clique with a
guarantee on the approximation ratio.
:mod:`networkx.algorithms.clique`
Functions for finding the exact maximum clique in a graph.
"""
degrees = G.degree
def _clique_heuristic(G, U, size, best_size):
if not U:
return max(best_size, size)
u = max(U, key=degrees)
U.remove(u)
N_prime = {v for v in G[u] if degrees[v] >= best_size}
return _clique_heuristic(G, U & N_prime, size + 1, best_size)
best_size = 0
nodes = (u for u in G if degrees[u] >= best_size)
for u in nodes:
neighbors = {v for v in G[u] if degrees[v] >= best_size}
best_size = _clique_heuristic(G, neighbors, 1, best_size)
return best_size

@ -1,58 +0,0 @@
from networkx.utils import not_implemented_for
from networkx.utils import py_random_state
__all__ = ["average_clustering"]
@py_random_state(2)
@not_implemented_for("directed")
def average_clustering(G, trials=1000, seed=None):
r"""Estimates the average clustering coefficient of G.
The local clustering of each node in `G` is the fraction of triangles
that actually exist over all possible triangles in its neighborhood.
The average clustering coefficient of a graph `G` is the mean of
local clusterings.
This function finds an approximate average clustering coefficient
for G by repeating `n` times (defined in `trials`) the following
experiment: choose a node at random, choose two of its neighbors
at random, and check if they are connected. The approximate
coefficient is the fraction of triangles found over the number
of trials [1]_.
Parameters
----------
G : NetworkX graph
trials : integer
Number of trials to perform (default 1000).
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
c : float
Approximated average clustering coefficient.
References
----------
.. [1] Schank, Thomas, and Dorothea Wagner. Approximating clustering
coefficient and transitivity. Universität Karlsruhe, Fakultät für
Informatik, 2004.
http://www.emis.ams.org/journals/JGAA/accepted/2005/SchankWagner2005.9.2.pdf
"""
n = len(G)
triangles = 0
nodes = list(G)
for i in [int(seed.random() * n) for i in range(trials)]:
nbrs = list(G[nodes[i]])
if len(nbrs) < 2:
continue
u, v = seed.sample(nbrs, 2)
if u in G[v]:
triangles += 1
return triangles / float(trials)

@ -1,403 +0,0 @@
""" Fast approximation for node connectivity
"""
import itertools
from operator import itemgetter
import networkx as nx
__all__ = [
"local_node_connectivity",
"node_connectivity",
"all_pairs_node_connectivity",
]
INF = float("inf")
def local_node_connectivity(G, source, target, cutoff=None):
"""Compute node connectivity between source and target.
Pairwise or local node connectivity between two distinct and nonadjacent
nodes is the minimum number of nodes that must be removed (minimum
separating cutset) to disconnect them. By Menger's theorem, this is equal
to the number of node independent paths (paths that share no nodes other
than source and target). Which is what we compute in this function.
This algorithm is a fast approximation that gives an strict lower
bound on the actual number of node independent paths between two nodes [1]_.
It works for both directed and undirected graphs.
Parameters
----------
G : NetworkX graph
source : node
Starting node for node connectivity
target : node
Ending node for node connectivity
cutoff : integer
Maximum node connectivity to consider. If None, the minimum degree
of source or target is used as a cutoff. Default value None.
Returns
-------
k: integer
pairwise node connectivity
Examples
--------
>>> # Platonic octahedral graph has node connectivity 4
>>> # for each non adjacent node pair
>>> from networkx.algorithms import approximation as approx
>>> G = nx.octahedral_graph()
>>> approx.local_node_connectivity(G, 0, 5)
4
Notes
-----
This algorithm [1]_ finds node independents paths between two nodes by
computing their shortest path using BFS, marking the nodes of the path
found as 'used' and then searching other shortest paths excluding the
nodes marked as used until no more paths exist. It is not exact because
a shortest path could use nodes that, if the path were longer, may belong
to two different node independent paths. Thus it only guarantees an
strict lower bound on node connectivity.
Note that the authors propose a further refinement, losing accuracy and
gaining speed, which is not implemented yet.
See also
--------
all_pairs_node_connectivity
node_connectivity
References
----------
.. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for
Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
http://eclectic.ss.uci.edu/~drwhite/working.pdf
"""
if target == source:
raise nx.NetworkXError("source and target have to be different nodes.")
# Maximum possible node independent paths
if G.is_directed():
possible = min(G.out_degree(source), G.in_degree(target))
else:
possible = min(G.degree(source), G.degree(target))
K = 0
if not possible:
return K
if cutoff is None:
cutoff = INF
exclude = set()
for i in range(min(possible, cutoff)):
try:
path = _bidirectional_shortest_path(G, source, target, exclude)
exclude.update(set(path))
K += 1
except nx.NetworkXNoPath:
break
return K
def node_connectivity(G, s=None, t=None):
r"""Returns an approximation for node connectivity for a graph or digraph G.
Node connectivity is equal to the minimum number of nodes that
must be removed to disconnect G or render it trivial. By Menger's theorem,
this is equal to the number of node independent paths (paths that
share no nodes other than source and target).
If source and target nodes are provided, this function returns the
local node connectivity: the minimum number of nodes that must be
removed to break all paths from source to target in G.
This algorithm is based on a fast approximation that gives an strict lower
bound on the actual number of node independent paths between two nodes [1]_.
It works for both directed and undirected graphs.
Parameters
----------
G : NetworkX graph
Undirected graph
s : node
Source node. Optional. Default value: None.
t : node
Target node. Optional. Default value: None.
Returns
-------
K : integer
Node connectivity of G, or local node connectivity if source
and target are provided.
Examples
--------
>>> # Platonic octahedral graph is 4-node-connected
>>> from networkx.algorithms import approximation as approx
>>> G = nx.octahedral_graph()
>>> approx.node_connectivity(G)
4
Notes
-----
This algorithm [1]_ finds node independents paths between two nodes by
computing their shortest path using BFS, marking the nodes of the path
found as 'used' and then searching other shortest paths excluding the
nodes marked as used until no more paths exist. It is not exact because
a shortest path could use nodes that, if the path were longer, may belong
to two different node independent paths. Thus it only guarantees an
strict lower bound on node connectivity.
See also
--------
all_pairs_node_connectivity
local_node_connectivity
References
----------
.. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for
Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
http://eclectic.ss.uci.edu/~drwhite/working.pdf
"""
if (s is not None and t is None) or (s is None and t is not None):
raise nx.NetworkXError("Both source and target must be specified.")
# Local node connectivity
if s is not None and t is not None:
if s not in G:
raise nx.NetworkXError(f"node {s} not in graph")
if t not in G:
raise nx.NetworkXError(f"node {t} not in graph")
return local_node_connectivity(G, s, t)
# Global node connectivity
if G.is_directed():
connected_func = nx.is_weakly_connected
iter_func = itertools.permutations
def neighbors(v):
return itertools.chain(G.predecessors(v), G.successors(v))
else:
connected_func = nx.is_connected
iter_func = itertools.combinations
neighbors = G.neighbors
if not connected_func(G):
return 0
# Choose a node with minimum degree
v, minimum_degree = min(G.degree(), key=itemgetter(1))
# Node connectivity is bounded by minimum degree
K = minimum_degree
# compute local node connectivity with all non-neighbors nodes
# and store the minimum
for w in set(G) - set(neighbors(v)) - {v}:
K = min(K, local_node_connectivity(G, v, w, cutoff=K))
# Same for non adjacent pairs of neighbors of v
for x, y in iter_func(neighbors(v), 2):
if y not in G[x] and x != y:
K = min(K, local_node_connectivity(G, x, y, cutoff=K))
return K
def all_pairs_node_connectivity(G, nbunch=None, cutoff=None):
""" Compute node connectivity between all pairs of nodes.
Pairwise or local node connectivity between two distinct and nonadjacent
nodes is the minimum number of nodes that must be removed (minimum
separating cutset) to disconnect them. By Menger's theorem, this is equal
to the number of node independent paths (paths that share no nodes other
than source and target). Which is what we compute in this function.
This algorithm is a fast approximation that gives an strict lower
bound on the actual number of node independent paths between two nodes [1]_.
It works for both directed and undirected graphs.
Parameters
----------
G : NetworkX graph
nbunch: container
Container of nodes. If provided node connectivity will be computed
only over pairs of nodes in nbunch.
cutoff : integer
Maximum node connectivity to consider. If None, the minimum degree
of source or target is used as a cutoff in each pair of nodes.
Default value None.
Returns
-------
K : dictionary
Dictionary, keyed by source and target, of pairwise node connectivity
See Also
--------
local_node_connectivity
node_connectivity
References
----------
.. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for
Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
http://eclectic.ss.uci.edu/~drwhite/working.pdf
"""
if nbunch is None:
nbunch = G
else:
nbunch = set(nbunch)
directed = G.is_directed()
if directed:
iter_func = itertools.permutations
else:
iter_func = itertools.combinations
all_pairs = {n: {} for n in nbunch}
for u, v in iter_func(nbunch, 2):
k = local_node_connectivity(G, u, v, cutoff=cutoff)
all_pairs[u][v] = k
if not directed:
all_pairs[v][u] = k
return all_pairs
def _bidirectional_shortest_path(G, source, target, exclude):
"""Returns shortest path between source and target ignoring nodes in the
container 'exclude'.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
exclude: container
Container for nodes to exclude from the search for shortest paths
Returns
-------
path: list
Shortest path between source and target ignoring nodes in 'exclude'
Raises
------
NetworkXNoPath
If there is no path or if nodes are adjacent and have only one path
between them
Notes
-----
This function and its helper are originally from
networkx.algorithms.shortest_paths.unweighted and are modified to
accept the extra parameter 'exclude', which is a container for nodes
already used in other paths that should be ignored.
References
----------
.. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for
Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
http://eclectic.ss.uci.edu/~drwhite/working.pdf
"""
# call helper to do the real work
results = _bidirectional_pred_succ(G, source, target, exclude)
pred, succ, w = results
# build path from pred+w+succ
path = []
# from source to w
while w is not None:
path.append(w)
w = pred[w]
path.reverse()
# from w to target
w = succ[path[-1]]
while w is not None:
path.append(w)
w = succ[w]
return path
def _bidirectional_pred_succ(G, source, target, exclude):
# does BFS from both source and target and meets in the middle
# excludes nodes in the container "exclude" from the search
if source is None or target is None:
raise nx.NetworkXException(
"Bidirectional shortest path called without source or target"
)
if target == source:
return ({target: None}, {source: None}, source)
# handle either directed or undirected
if G.is_directed():
Gpred = G.predecessors
Gsucc = G.successors
else:
Gpred = G.neighbors
Gsucc = G.neighbors
# predecesssor and successors in search
pred = {source: None}
succ = {target: None}
# initialize fringes, start with forward
forward_fringe = [source]
reverse_fringe = [target]
level = 0
while forward_fringe and reverse_fringe:
# Make sure that we iterate one step forward and one step backwards
# thus source and target will only trigger "found path" when they are
# adjacent and then they can be safely included in the container 'exclude'
level += 1
if not level % 2 == 0:
this_level = forward_fringe
forward_fringe = []
for v in this_level:
for w in Gsucc(v):
if w in exclude:
continue
if w not in pred:
forward_fringe.append(w)
pred[w] = v
if w in succ:
return pred, succ, w # found path
else:
this_level = reverse_fringe
reverse_fringe = []
for v in this_level:
for w in Gpred(v):
if w in exclude:
continue
if w not in succ:
succ[w] = v
reverse_fringe.append(w)
if w in pred:
return pred, succ, w # found path
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")

@ -1,123 +0,0 @@
"""Functions for finding node and edge dominating sets.
A `dominating set`_ for an undirected graph *G* with vertex set *V*
and edge set *E* is a subset *D* of *V* such that every vertex not in
*D* is adjacent to at least one member of *D*. An `edge dominating set`_
is a subset *F* of *E* such that every edge not in *F* is
incident to an endpoint of at least one edge in *F*.
.. _dominating set: https://en.wikipedia.org/wiki/Dominating_set
.. _edge dominating set: https://en.wikipedia.org/wiki/Edge_dominating_set
"""
from ..matching import maximal_matching
from ...utils import not_implemented_for
__all__ = ["min_weighted_dominating_set", "min_edge_dominating_set"]
# TODO Why doesn't this algorithm work for directed graphs?
@not_implemented_for("directed")
def min_weighted_dominating_set(G, weight=None):
r"""Returns a dominating set that approximates the minimum weight node
dominating set.
Parameters
----------
G : NetworkX graph
Undirected graph.
weight : string
The node attribute storing the weight of an node. If provided,
the node attribute with this key must be a number for each
node. If not provided, each node is assumed to have weight one.
Returns
-------
min_weight_dominating_set : set
A set of nodes, the sum of whose weights is no more than `(\log
w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of
each node in the graph and `w(V^*)` denotes the sum of the
weights of each node in the minimum weight dominating set.
Notes
-----
This algorithm computes an approximate minimum weighted dominating
set for the graph `G`. The returned solution has weight `(\log
w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each
node in the graph and `w(V^*)` denotes the sum of the weights of
each node in the minimum weight dominating set for the graph.
This implementation of the algorithm runs in $O(m)$ time, where $m$
is the number of edges in the graph.
References
----------
.. [1] Vazirani, Vijay V.
*Approximation Algorithms*.
Springer Science & Business Media, 2001.
"""
# The unique dominating set for the null graph is the empty set.
if len(G) == 0:
return set()
# This is the dominating set that will eventually be returned.
dom_set = set()
def _cost(node_and_neighborhood):
"""Returns the cost-effectiveness of greedily choosing the given
node.
`node_and_neighborhood` is a two-tuple comprising a node and its
closed neighborhood.
"""
v, neighborhood = node_and_neighborhood
return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set)
# This is a set of all vertices not already covered by the
# dominating set.
vertices = set(G)
# This is a dictionary mapping each node to the closed neighborhood
# of that node.
neighborhoods = {v: {v} | set(G[v]) for v in G}
# Continue until all vertices are adjacent to some node in the
# dominating set.
while vertices:
# Find the most cost-effective node to add, along with its
# closed neighborhood.
dom_node, min_set = min(neighborhoods.items(), key=_cost)
# Add the node to the dominating set and reduce the remaining
# set of nodes to cover.
dom_set.add(dom_node)
del neighborhoods[dom_node]
vertices -= min_set
return dom_set
def min_edge_dominating_set(G):
r"""Returns minimum cardinality edge dominating set.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
min_edge_dominating_set : set
Returns a set of dominating edges whose size is no more than 2 * OPT.
Notes
-----
The algorithm computes an approximate solution to the edge dominating set
problem. The result is no more than 2 * OPT in terms of size of the set.
Runtime of the algorithm is $O(|E|)$.
"""
if not G:
raise ValueError("Expected non-empty NetworkX graph!")
return maximal_matching(G)

@ -1,58 +0,0 @@
r"""
Independent Set
Independent set or stable set is a set of vertices in a graph, no two of
which are adjacent. That is, it is a set I of vertices such that for every
two vertices in I, there is no edge connecting the two. Equivalently, each
edge in the graph has at most one endpoint in I. The size of an independent
set is the number of vertices it contains.
A maximum independent set is a largest independent set for a given graph G
and its size is denoted $\alpha(G)$. The problem of finding such a set is called
the maximum independent set problem and is an NP-hard optimization problem.
As such, it is unlikely that there exists an efficient algorithm for finding
a maximum independent set of a graph.
`Wikipedia: Independent set <https://en.wikipedia.org/wiki/Independent_set_(graph_theory)>`_
Independent set algorithm is based on the following paper:
$O(|V|/(log|V|)^2)$ apx of maximum clique/independent set.
Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180196. Springer.
doi:10.1007/BF01994876
"""
from networkx.algorithms.approximation import clique_removal
__all__ = ["maximum_independent_set"]
def maximum_independent_set(G):
"""Returns an approximate maximum independent set.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
iset : Set
The apx-maximum independent set
Notes
-----
Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case.
References
----------
.. [1] Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180196. Springer.
"""
iset, _ = clique_removal(G)
return iset

@ -1,368 +0,0 @@
""" Fast approximation for k-component structure
"""
import itertools
from collections import defaultdict
from collections.abc import Mapping
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import not_implemented_for
from networkx.algorithms.approximation import local_node_connectivity
__all__ = ["k_components"]
not_implemented_for("directed")
def k_components(G, min_density=0.95):
r"""Returns the approximate k-component structure of a graph G.
A `k`-component is a maximal subgraph of a graph G that has, at least,
node connectivity `k`: we need to remove at least `k` nodes to break it
into more components. `k`-components have an inherent hierarchical
structure because they are nested in terms of connectivity: a connected
graph can contain several 2-components, each of which can contain
one or more 3-components, and so forth.
This implementation is based on the fast heuristics to approximate
the `k`-component structure of a graph [1]_. Which, in turn, it is based on
a fast approximation algorithm for finding good lower bounds of the number
of node independent paths between two nodes [2]_.
Parameters
----------
G : NetworkX graph
Undirected graph
min_density : Float
Density relaxation threshold. Default value 0.95
Returns
-------
k_components : dict
Dictionary with connectivity level `k` as key and a list of
sets of nodes that form a k-component of level `k` as values.
Examples
--------
>>> # Petersen graph has 10 nodes and it is triconnected, thus all
>>> # nodes are in a single component on all three connectivity levels
>>> from networkx.algorithms import approximation as apxa
>>> G = nx.petersen_graph()
>>> k_components = apxa.k_components(G)
Notes
-----
The logic of the approximation algorithm for computing the `k`-component
structure [1]_ is based on repeatedly applying simple and fast algorithms
for `k`-cores and biconnected components in order to narrow down the
number of pairs of nodes over which we have to compute White and Newman's
approximation algorithm for finding node independent paths [2]_. More
formally, this algorithm is based on Whitney's theorem, which states
an inclusion relation among node connectivity, edge connectivity, and
minimum degree for any graph G. This theorem implies that every
`k`-component is nested inside a `k`-edge-component, which in turn,
is contained in a `k`-core. Thus, this algorithm computes node independent
paths among pairs of nodes in each biconnected part of each `k`-core,
and repeats this procedure for each `k` from 3 to the maximal core number
of a node in the input graph.
Because, in practice, many nodes of the core of level `k` inside a
bicomponent actually are part of a component of level k, the auxiliary
graph needed for the algorithm is likely to be very dense. Thus, we use
a complement graph data structure (see `AntiGraph`) to save memory.
AntiGraph only stores information of the edges that are *not* present
in the actual auxiliary graph. When applying algorithms to this
complement graph data structure, it behaves as if it were the dense
version.
See also
--------
k_components
References
----------
.. [1] Torrents, J. and F. Ferraro (2015) Structural Cohesion:
Visualization and Heuristics for Fast Computation.
https://arxiv.org/pdf/1503.04476v1
.. [2] White, Douglas R., and Mark Newman (2001) A Fast Algorithm for
Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
http://eclectic.ss.uci.edu/~drwhite/working.pdf
.. [3] Moody, J. and D. White (2003). Social cohesion and embeddedness:
A hierarchical conception of social groups.
American Sociological Review 68(1), 103--28.
http://www2.asanet.org/journals/ASRFeb03MoodyWhite.pdf
"""
# Dictionary with connectivity level (k) as keys and a list of
# sets of nodes that form a k-component as values
k_components = defaultdict(list)
# make a few functions local for speed
node_connectivity = local_node_connectivity
k_core = nx.k_core
core_number = nx.core_number
biconnected_components = nx.biconnected_components
density = nx.density
combinations = itertools.combinations
# Exact solution for k = {1,2}
# There is a linear time algorithm for triconnectivity, if we had an
# implementation available we could start from k = 4.
for component in nx.connected_components(G):
# isolated nodes have connectivity 0
comp = set(component)
if len(comp) > 1:
k_components[1].append(comp)
for bicomponent in nx.biconnected_components(G):
# avoid considering dyads as bicomponents
bicomp = set(bicomponent)
if len(bicomp) > 2:
k_components[2].append(bicomp)
# There is no k-component of k > maximum core number
# \kappa(G) <= \lambda(G) <= \delta(G)
g_cnumber = core_number(G)
max_core = max(g_cnumber.values())
for k in range(3, max_core + 1):
C = k_core(G, k, core_number=g_cnumber)
for nodes in biconnected_components(C):
# Build a subgraph SG induced by the nodes that are part of
# each biconnected component of the k-core subgraph C.
if len(nodes) < k:
continue
SG = G.subgraph(nodes)
# Build auxiliary graph
H = _AntiGraph()
H.add_nodes_from(SG.nodes())
for u, v in combinations(SG, 2):
K = node_connectivity(SG, u, v, cutoff=k)
if k > K:
H.add_edge(u, v)
for h_nodes in biconnected_components(H):
if len(h_nodes) <= k:
continue
SH = H.subgraph(h_nodes)
for Gc in _cliques_heuristic(SG, SH, k, min_density):
for k_nodes in biconnected_components(Gc):
Gk = nx.k_core(SG.subgraph(k_nodes), k)
if len(Gk) <= k:
continue
k_components[k].append(set(Gk))
return k_components
def _cliques_heuristic(G, H, k, min_density):
h_cnumber = nx.core_number(H)
for i, c_value in enumerate(sorted(set(h_cnumber.values()), reverse=True)):
cands = {n for n, c in h_cnumber.items() if c == c_value}
# Skip checking for overlap for the highest core value
if i == 0:
overlap = False
else:
overlap = set.intersection(
*[{x for x in H[n] if x not in cands} for n in cands]
)
if overlap and len(overlap) < k:
SH = H.subgraph(cands | overlap)
else:
SH = H.subgraph(cands)
sh_cnumber = nx.core_number(SH)
SG = nx.k_core(G.subgraph(SH), k)
while not (_same(sh_cnumber) and nx.density(SH) >= min_density):
# This subgraph must be writable => .copy()
SH = H.subgraph(SG).copy()
if len(SH) <= k:
break
sh_cnumber = nx.core_number(SH)
sh_deg = dict(SH.degree())
min_deg = min(sh_deg.values())
SH.remove_nodes_from(n for n, d in sh_deg.items() if d == min_deg)
SG = nx.k_core(G.subgraph(SH), k)
else:
yield SG
def _same(measure, tol=0):
vals = set(measure.values())
if (max(vals) - min(vals)) <= tol:
return True
return False
class _AntiGraph(nx.Graph):
"""
Class for complement graphs.
The main goal is to be able to work with big and dense graphs with
a low memory foodprint.
In this class you add the edges that *do not exist* in the dense graph,
the report methods of the class return the neighbors, the edges and
the degree as if it was the dense graph. Thus it's possible to use
an instance of this class with some of NetworkX functions. In this
case we only use k-core, connected_components, and biconnected_components.
"""
all_edge_dict = {"weight": 1}
def single_edge_dict(self):
return self.all_edge_dict
edge_attr_dict_factory = single_edge_dict
def __getitem__(self, n):
"""Returns a dict of neighbors of node n in the dense graph.
Parameters
----------
n : node
A node in the graph.
Returns
-------
adj_dict : dictionary
The adjacency dictionary for nodes connected to n.
"""
all_edge_dict = self.all_edge_dict
return {
node: all_edge_dict for node in set(self._adj) - set(self._adj[n]) - {n}
}
def neighbors(self, n):
"""Returns an iterator over all neighbors of node n in the
dense graph.
"""
try:
return iter(set(self._adj) - set(self._adj[n]) - {n})
except KeyError as e:
raise NetworkXError(f"The node {n} is not in the graph.") from e
class AntiAtlasView(Mapping):
"""An adjacency inner dict for AntiGraph"""
def __init__(self, graph, node):
self._graph = graph
self._atlas = graph._adj[node]
self._node = node
def __len__(self):
return len(self._graph) - len(self._atlas) - 1
def __iter__(self):
return (n for n in self._graph if n not in self._atlas and n != self._node)
def __getitem__(self, nbr):
nbrs = set(self._graph._adj) - set(self._atlas) - {self._node}
if nbr in nbrs:
return self._graph.all_edge_dict
raise KeyError(nbr)
class AntiAdjacencyView(AntiAtlasView):
"""An adjacency outer dict for AntiGraph"""
def __init__(self, graph):
self._graph = graph
self._atlas = graph._adj
def __len__(self):
return len(self._atlas)
def __iter__(self):
return iter(self._graph)
def __getitem__(self, node):
if node not in self._graph:
raise KeyError(node)
return self._graph.AntiAtlasView(self._graph, node)
@property
def adj(self):
return self.AntiAdjacencyView(self)
def subgraph(self, nodes):
"""This subgraph method returns a full AntiGraph. Not a View"""
nodes = set(nodes)
G = _AntiGraph()
G.add_nodes_from(nodes)
for n in G:
Gnbrs = G.adjlist_inner_dict_factory()
G._adj[n] = Gnbrs
for nbr, d in self._adj[n].items():
if nbr in G._adj:
Gnbrs[nbr] = d
G._adj[nbr][n] = d
G.graph = self.graph
return G
class AntiDegreeView(nx.reportviews.DegreeView):
def __iter__(self):
all_nodes = set(self._succ)
for n in self._nodes:
nbrs = all_nodes - set(self._succ[n]) - {n}
yield (n, len(nbrs))
def __getitem__(self, n):
nbrs = set(self._succ) - set(self._succ[n]) - {n}
# AntiGraph is a ThinGraph so all edges have weight 1
return len(nbrs) + (n in nbrs)
@property
def degree(self):
"""Returns an iterator for (node, degree) and degree for single node.
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
deg:
Degree of the node, if a single node is passed as argument.
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = nx.path_graph(4)
>>> G.degree(0) # node 0 with degree 1
1
>>> list(G.degree([0, 1]))
[(0, 1), (1, 2)]
"""
return self.AntiDegreeView(self)
def adjacency(self):
"""Returns an iterator of (node, adjacency set) tuples for all nodes
in the dense graph.
This is the fastest way to look at every edge.
For directed graphs, only outgoing adjacencies are included.
Returns
-------
adj_iter : iterator
An iterator of (node, adjacency set) for all nodes in
the graph.
"""
for n in self._adj:
yield (n, set(self._adj) - set(self._adj[n]) - {n})

@ -1,42 +0,0 @@
"""
**************
Graph Matching
**************
Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent
edges; that is, no two edges share a common vertex.
`Wikipedia: Matching <https://en.wikipedia.org/wiki/Matching_(graph_theory)>`_
"""
import networkx as nx
__all__ = ["min_maximal_matching"]
def min_maximal_matching(G):
r"""Returns the minimum maximal matching of G. That is, out of all maximal
matchings of the graph G, the smallest is returned.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
min_maximal_matching : set
Returns a set of edges such that no two edges share a common endpoint
and every edge not in the set shares some common endpoint in the set.
Cardinality will be 2*OPT in the worst case.
Notes
-----
The algorithm computes an approximate solution fo the minimum maximal
cardinality matching problem. The solution is no more than 2 * OPT in size.
Runtime is $O(|E|)$.
References
----------
.. [1] Vazirani, Vijay Approximation Algorithms (2001)
"""
return nx.maximal_matching(G)

@ -1,42 +0,0 @@
"""
Ramsey numbers.
"""
import networkx as nx
from ...utils import arbitrary_element
__all__ = ["ramsey_R2"]
def ramsey_R2(G):
r"""Compute the largest clique and largest independent set in `G`.
This can be used to estimate bounds for the 2-color
Ramsey number `R(2;s,t)` for `G`.
This is a recursive implementation which could run into trouble
for large recursions. Note that self-loop edges are ignored.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
max_pair : (set, set) tuple
Maximum clique, Maximum independent set.
"""
if not G:
return set(), set()
node = arbitrary_element(G)
nbrs = (nbr for nbr in nx.all_neighbors(G, node) if nbr != node)
nnbrs = nx.non_neighbors(G, node)
c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy())
c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy())
c_1.add(node)
i_2.add(node)
# Choose the larger of the two cliques and the larger of the two
# independent sets, according to cardinality.
return max(c_1, c_2, key=len), max(i_1, i_2, key=len)

@ -1,104 +0,0 @@
from itertools import chain
from networkx.utils import pairwise, not_implemented_for
import networkx as nx
__all__ = ["metric_closure", "steiner_tree"]
@not_implemented_for("directed")
def metric_closure(G, weight="weight"):
""" Return the metric closure of a graph.
The metric closure of a graph *G* is the complete graph in which each edge
is weighted by the shortest path distance between the nodes in *G* .
Parameters
----------
G : NetworkX graph
Returns
-------
NetworkX graph
Metric closure of the graph `G`.
"""
M = nx.Graph()
Gnodes = set(G)
# check for connected graph while processing first node
all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight)
u, (distance, path) = next(all_paths_iter)
if Gnodes - set(distance):
msg = "G is not a connected graph. metric_closure is not defined."
raise nx.NetworkXError(msg)
Gnodes.remove(u)
for v in Gnodes:
M.add_edge(u, v, distance=distance[v], path=path[v])
# first node done -- now process the rest
for u, (distance, path) in all_paths_iter:
Gnodes.remove(u)
for v in Gnodes:
M.add_edge(u, v, distance=distance[v], path=path[v])
return M
@not_implemented_for("directed")
def steiner_tree(G, terminal_nodes, weight="weight"):
""" Return an approximation to the minimum Steiner tree of a graph.
The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes`
is a tree within `G` that spans those nodes and has minimum size
(sum of edge weights) among all such trees.
The minimum Steiner tree can be approximated by computing the minimum
spanning tree of the subgraph of the metric closure of *G* induced by the
terminal nodes, where the metric closure of *G* is the complete graph in
which each edge is weighted by the shortest path distance between the
nodes in *G* .
This algorithm produces a tree whose weight is within a (2 - (2 / t))
factor of the weight of the optimal Steiner tree where *t* is number of
terminal nodes.
Parameters
----------
G : NetworkX graph
terminal_nodes : list
A list of terminal nodes for which minimum steiner tree is
to be found.
Returns
-------
NetworkX graph
Approximation to the minimum steiner tree of `G` induced by
`terminal_nodes` .
Notes
-----
For multigraphs, the edge between two nodes with minimum weight is the
edge put into the Steiner tree.
References
----------
.. [1] Steiner_tree_problem on Wikipedia.
https://en.wikipedia.org/wiki/Steiner_tree_problem
"""
# H is the subgraph induced by terminal_nodes in the metric closure M of G.
M = metric_closure(G, weight=weight)
H = M.subgraph(terminal_nodes)
# Use the 'distance' attribute of each edge provided by M.
mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True)
# Create an iterator over each edge in each shortest path; repeats are okay
edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges)
# For multigraph we should add the minimal weight edge keys
if G.is_multigraph():
edges = (
(u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges
)
T = G.edge_subgraph(edges)
return T

@ -1,43 +0,0 @@
import networkx as nx
from networkx.algorithms.approximation import average_clustering
# This approximation has to be be exact in regular graphs
# with no triangles or with all possible triangles.
def test_petersen():
# Actual coefficient is 0
G = nx.petersen_graph()
assert average_clustering(G, trials=int(len(G) / 2)) == nx.average_clustering(G)
def test_petersen_seed():
# Actual coefficient is 0
G = nx.petersen_graph()
assert average_clustering(
G, trials=int(len(G) / 2), seed=1
) == nx.average_clustering(G)
def test_tetrahedral():
# Actual coefficient is 1
G = nx.tetrahedral_graph()
assert average_clustering(G, trials=int(len(G) / 2)) == nx.average_clustering(G)
def test_dodecahedral():
# Actual coefficient is 0
G = nx.dodecahedral_graph()
assert average_clustering(G, trials=int(len(G) / 2)) == nx.average_clustering(G)
def test_empty():
G = nx.empty_graph(5)
assert average_clustering(G, trials=int(len(G) / 2)) == 0
def test_complete():
G = nx.complete_graph(5)
assert average_clustering(G, trials=int(len(G) / 2)) == 1
G = nx.complete_graph(7)
assert average_clustering(G, trials=int(len(G) / 2)) == 1

@ -1,107 +0,0 @@
"""Unit tests for the :mod:`networkx.algorithms.approximation.clique`
module.
"""
import networkx as nx
from networkx.algorithms.approximation import max_clique
from networkx.algorithms.approximation import clique_removal
from networkx.algorithms.approximation import large_clique_size
def is_independent_set(G, nodes):
"""Returns True if and only if `nodes` is a clique in `G`.
`G` is a NetworkX graph. `nodes` is an iterable of nodes in
`G`.
"""
return G.subgraph(nodes).number_of_edges() == 0
def is_clique(G, nodes):
"""Returns True if and only if `nodes` is an independent set
in `G`.
`G` is an undirected simple graph. `nodes` is an iterable of
nodes in `G`.
"""
H = G.subgraph(nodes)
n = len(H)
return H.number_of_edges() == n * (n - 1) // 2
class TestCliqueRemoval:
"""Unit tests for the
:func:`~networkx.algorithms.approximation.clique_removal` function.
"""
def test_trivial_graph(self):
G = nx.trivial_graph()
independent_set, cliques = clique_removal(G)
assert is_independent_set(G, independent_set)
assert all(is_clique(G, clique) for clique in cliques)
# In fact, we should only have 1-cliques, that is, singleton nodes.
assert all(len(clique) == 1 for clique in cliques)
def test_complete_graph(self):
G = nx.complete_graph(10)
independent_set, cliques = clique_removal(G)
assert is_independent_set(G, independent_set)
assert all(is_clique(G, clique) for clique in cliques)
def test_barbell_graph(self):
G = nx.barbell_graph(10, 5)
independent_set, cliques = clique_removal(G)
assert is_independent_set(G, independent_set)
assert all(is_clique(G, clique) for clique in cliques)
class TestMaxClique:
"""Unit tests for the :func:`networkx.algorithms.approximation.max_clique`
function.
"""
def test_null_graph(self):
G = nx.null_graph()
assert len(max_clique(G)) == 0
def test_complete_graph(self):
graph = nx.complete_graph(30)
# this should return the entire graph
mc = max_clique(graph)
assert 30 == len(mc)
def test_maximal_by_cardinality(self):
"""Tests that the maximal clique is computed according to maximum
cardinality of the sets.
For more information, see pull request #1531.
"""
G = nx.complete_graph(5)
G.add_edge(4, 5)
clique = max_clique(G)
assert len(clique) > 1
G = nx.lollipop_graph(30, 2)
clique = max_clique(G)
assert len(clique) > 2
def test_large_clique_size():
G = nx.complete_graph(9)
nx.add_cycle(G, [9, 10, 11])
G.add_edge(8, 9)
G.add_edge(1, 12)
G.add_node(13)
assert large_clique_size(G) == 9
G.remove_node(5)
assert large_clique_size(G) == 8
G.remove_edge(2, 3)
assert large_clique_size(G) == 7

@ -1,199 +0,0 @@
import pytest
import networkx as nx
from networkx.algorithms import approximation as approx
def test_global_node_connectivity():
# Figure 1 chapter on Connectivity
G = nx.Graph()
G.add_edges_from(
[
(1, 2),
(1, 3),
(1, 4),
(1, 5),
(2, 3),
(2, 6),
(3, 4),
(3, 6),
(4, 6),
(4, 7),
(5, 7),
(6, 8),
(6, 9),
(7, 8),
(7, 10),
(8, 11),
(9, 10),
(9, 11),
(10, 11),
]
)
assert 2 == approx.local_node_connectivity(G, 1, 11)
assert 2 == approx.node_connectivity(G)
assert 2 == approx.node_connectivity(G, 1, 11)
def test_white_harary1():
# Figure 1b white and harary (2001)
# A graph with high adhesion (edge connectivity) and low cohesion
# (node connectivity)
G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
G.remove_node(7)
for i in range(4, 7):
G.add_edge(0, i)
G = nx.disjoint_union(G, nx.complete_graph(4))
G.remove_node(G.order() - 1)
for i in range(7, 10):
G.add_edge(0, i)
assert 1 == approx.node_connectivity(G)
def test_complete_graphs():
for n in range(5, 25, 5):
G = nx.complete_graph(n)
assert n - 1 == approx.node_connectivity(G)
assert n - 1 == approx.node_connectivity(G, 0, 3)
def test_empty_graphs():
for k in range(5, 25, 5):
G = nx.empty_graph(k)
assert 0 == approx.node_connectivity(G)
assert 0 == approx.node_connectivity(G, 0, 3)
def test_petersen():
G = nx.petersen_graph()
assert 3 == approx.node_connectivity(G)
assert 3 == approx.node_connectivity(G, 0, 5)
# Approximation fails with tutte graph
# def test_tutte():
# G = nx.tutte_graph()
# assert_equal(3, approx.node_connectivity(G))
def test_dodecahedral():
G = nx.dodecahedral_graph()
assert 3 == approx.node_connectivity(G)
assert 3 == approx.node_connectivity(G, 0, 5)
def test_octahedral():
G = nx.octahedral_graph()
assert 4 == approx.node_connectivity(G)
assert 4 == approx.node_connectivity(G, 0, 5)
# Approximation can fail with icosahedral graph depending
# on iteration order.
# def test_icosahedral():
# G=nx.icosahedral_graph()
# assert_equal(5, approx.node_connectivity(G))
# assert_equal(5, approx.node_connectivity(G, 0, 5))
def test_only_source():
G = nx.complete_graph(5)
pytest.raises(nx.NetworkXError, approx.node_connectivity, G, s=0)
def test_only_target():
G = nx.complete_graph(5)
pytest.raises(nx.NetworkXError, approx.node_connectivity, G, t=0)
def test_missing_source():
G = nx.path_graph(4)
pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 10, 1)
def test_missing_target():
G = nx.path_graph(4)
pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 1, 10)
def test_source_equals_target():
G = nx.complete_graph(5)
pytest.raises(nx.NetworkXError, approx.local_node_connectivity, G, 0, 0)
def test_directed_node_connectivity():
G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction
D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges
assert 1 == approx.node_connectivity(G)
assert 1 == approx.node_connectivity(G, 1, 4)
assert 2 == approx.node_connectivity(D)
assert 2 == approx.node_connectivity(D, 1, 4)
class TestAllPairsNodeConnectivityApprox:
@classmethod
def setup_class(cls):
cls.path = nx.path_graph(7)
cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph())
cls.cycle = nx.cycle_graph(7)
cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
cls.gnp = nx.gnp_random_graph(30, 0.1)
cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True)
cls.K20 = nx.complete_graph(20)
cls.K10 = nx.complete_graph(10)
cls.K5 = nx.complete_graph(5)
cls.G_list = [
cls.path,
cls.directed_path,
cls.cycle,
cls.directed_cycle,
cls.gnp,
cls.directed_gnp,
cls.K10,
cls.K5,
cls.K20,
]
def test_cycles(self):
K_undir = approx.all_pairs_node_connectivity(self.cycle)
for source in K_undir:
for target, k in K_undir[source].items():
assert k == 2
K_dir = approx.all_pairs_node_connectivity(self.directed_cycle)
for source in K_dir:
for target, k in K_dir[source].items():
assert k == 1
def test_complete(self):
for G in [self.K10, self.K5, self.K20]:
K = approx.all_pairs_node_connectivity(G)
for source in K:
for target, k in K[source].items():
assert k == len(G) - 1
def test_paths(self):
K_undir = approx.all_pairs_node_connectivity(self.path)
for source in K_undir:
for target, k in K_undir[source].items():
assert k == 1
K_dir = approx.all_pairs_node_connectivity(self.directed_path)
for source in K_dir:
for target, k in K_dir[source].items():
if source < target:
assert k == 1
else:
assert k == 0
def test_cutoff(self):
for G in [self.K10, self.K5, self.K20]:
for mp in [2, 3, 4]:
paths = approx.all_pairs_node_connectivity(G, cutoff=mp)
for source in paths:
for target, K in paths[source].items():
assert K == mp
def test_all_pairs_connectivity_nbunch(self):
G = nx.complete_graph(5)
nbunch = [0, 2, 3]
C = approx.all_pairs_node_connectivity(G, nbunch=nbunch)
assert len(C) == len(nbunch)

@ -1,65 +0,0 @@
import networkx as nx
from networkx.algorithms.approximation import min_weighted_dominating_set
from networkx.algorithms.approximation import min_edge_dominating_set
class TestMinWeightDominatingSet:
def test_min_weighted_dominating_set(self):
graph = nx.Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
graph.add_edge(2, 5)
graph.add_edge(3, 4)
graph.add_edge(3, 6)
graph.add_edge(5, 6)
vertices = {1, 2, 3, 4, 5, 6}
# due to ties, this might be hard to test tight bounds
dom_set = min_weighted_dominating_set(graph)
for vertex in vertices - dom_set:
neighbors = set(graph.neighbors(vertex))
assert len(neighbors & dom_set) > 0, "Non dominating set found!"
def test_star_graph(self):
"""Tests that an approximate dominating set for the star graph,
even when the center node does not have the smallest integer
label, gives just the center node.
For more information, see #1527.
"""
# Create a star graph in which the center node has the highest
# label instead of the lowest.
G = nx.star_graph(10)
G = nx.relabel_nodes(G, {0: 9, 9: 0})
assert min_weighted_dominating_set(G) == {9}
def test_min_edge_dominating_set(self):
graph = nx.path_graph(5)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
assert found, "Non adjacent edge found!"
graph = nx.complete_graph(10)
dom_set = min_edge_dominating_set(graph)
# this is a crappy way to test, but good enough for now.
for edge in graph.edges():
if edge in dom_set:
continue
else:
u, v = edge
found = False
for dom_edge in dom_set:
found |= u == dom_edge[0] or u == dom_edge[1]
assert found, "Non adjacent edge found!"

@ -1,8 +0,0 @@
import networkx as nx
import networkx.algorithms.approximation as a
def test_independent_set():
# smoke test
G = nx.Graph()
assert len(a.maximum_independent_set(G)) == 0

@ -1,300 +0,0 @@
# Test for approximation to k-components algorithm
import pytest
import networkx as nx
from networkx.algorithms.approximation import k_components
from networkx.algorithms.approximation.kcomponents import _AntiGraph, _same
def build_k_number_dict(k_components):
k_num = {}
for k, comps in sorted(k_components.items()):
for comp in comps:
for node in comp:
k_num[node] = k
return k_num
##
# Some nice synthetic graphs
##
def graph_example_1():
G = nx.convert_node_labels_to_integers(
nx.grid_graph([5, 5]), label_attribute="labels"
)
rlabels = nx.get_node_attributes(G, "labels")
labels = {v: k for k, v in rlabels.items()}
for nodes in [
(labels[(0, 0)], labels[(1, 0)]),
(labels[(0, 4)], labels[(1, 4)]),
(labels[(3, 0)], labels[(4, 0)]),
(labels[(3, 4)], labels[(4, 4)]),
]:
new_node = G.order() + 1
# Petersen graph is triconnected
P = nx.petersen_graph()
G = nx.disjoint_union(G, P)
# Add two edges between the grid and P
G.add_edge(new_node + 1, nodes[0])
G.add_edge(new_node, nodes[1])
# K5 is 4-connected
K = nx.complete_graph(5)
G = nx.disjoint_union(G, K)
# Add three edges between P and K5
G.add_edge(new_node + 2, new_node + 11)
G.add_edge(new_node + 3, new_node + 12)
G.add_edge(new_node + 4, new_node + 13)
# Add another K5 sharing a node
G = nx.disjoint_union(G, K)
nbrs = G[new_node + 10]
G.remove_node(new_node + 10)
for nbr in nbrs:
G.add_edge(new_node + 17, nbr)
G.add_edge(new_node + 16, new_node + 5)
return G
def torrents_and_ferraro_graph():
G = nx.convert_node_labels_to_integers(
nx.grid_graph([5, 5]), label_attribute="labels"
)
rlabels = nx.get_node_attributes(G, "labels")
labels = {v: k for k, v in rlabels.items()}
for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]:
new_node = G.order() + 1
# Petersen graph is triconnected
P = nx.petersen_graph()
G = nx.disjoint_union(G, P)
# Add two edges between the grid and P
G.add_edge(new_node + 1, nodes[0])
G.add_edge(new_node, nodes[1])
# K5 is 4-connected
K = nx.complete_graph(5)
G = nx.disjoint_union(G, K)
# Add three edges between P and K5
G.add_edge(new_node + 2, new_node + 11)
G.add_edge(new_node + 3, new_node + 12)
G.add_edge(new_node + 4, new_node + 13)
# Add another K5 sharing a node
G = nx.disjoint_union(G, K)
nbrs = G[new_node + 10]
G.remove_node(new_node + 10)
for nbr in nbrs:
G.add_edge(new_node + 17, nbr)
# Commenting this makes the graph not biconnected !!
# This stupid mistake make one reviewer very angry :P
G.add_edge(new_node + 16, new_node + 8)
for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]:
new_node = G.order() + 1
# Petersen graph is triconnected
P = nx.petersen_graph()
G = nx.disjoint_union(G, P)
# Add two edges between the grid and P
G.add_edge(new_node + 1, nodes[0])
G.add_edge(new_node, nodes[1])
# K5 is 4-connected
K = nx.complete_graph(5)
G = nx.disjoint_union(G, K)
# Add three edges between P and K5
G.add_edge(new_node + 2, new_node + 11)
G.add_edge(new_node + 3, new_node + 12)
G.add_edge(new_node + 4, new_node + 13)
# Add another K5 sharing two nodes
G = nx.disjoint_union(G, K)
nbrs = G[new_node + 10]
G.remove_node(new_node + 10)
for nbr in nbrs:
G.add_edge(new_node + 17, nbr)
nbrs2 = G[new_node + 9]
G.remove_node(new_node + 9)
for nbr in nbrs2:
G.add_edge(new_node + 18, nbr)
return G
# Helper function
def _check_connectivity(G):
result = k_components(G)
for k, components in result.items():
if k < 3:
continue
for component in components:
C = G.subgraph(component)
K = nx.node_connectivity(C)
assert K >= k
def test_torrents_and_ferraro_graph():
G = torrents_and_ferraro_graph()
_check_connectivity(G)
def test_example_1():
G = graph_example_1()
_check_connectivity(G)
def test_karate_0():
G = nx.karate_club_graph()
_check_connectivity(G)
def test_karate_1():
karate_k_num = {
0: 4,
1: 4,
2: 4,
3: 4,
4: 3,
5: 3,
6: 3,
7: 4,
8: 4,
9: 2,
10: 3,
11: 1,
12: 2,
13: 4,
14: 2,
15: 2,
16: 2,
17: 2,
18: 2,
19: 3,
20: 2,
21: 2,
22: 2,
23: 3,
24: 3,
25: 3,
26: 2,
27: 3,
28: 3,
29: 3,
30: 4,
31: 3,
32: 4,
33: 4,
}
approx_karate_k_num = karate_k_num.copy()
approx_karate_k_num[24] = 2
approx_karate_k_num[25] = 2
G = nx.karate_club_graph()
k_comps = k_components(G)
k_num = build_k_number_dict(k_comps)
assert k_num in (karate_k_num, approx_karate_k_num)
def test_example_1_detail_3_and_4():
G = graph_example_1()
result = k_components(G)
# In this example graph there are 8 3-components, 4 with 15 nodes
# and 4 with 5 nodes.
assert len(result[3]) == 8
assert len([c for c in result[3] if len(c) == 15]) == 4
assert len([c for c in result[3] if len(c) == 5]) == 4
# There are also 8 4-components all with 5 nodes.
assert len(result[4]) == 8
assert all(len(c) == 5 for c in result[4])
# Finally check that the k-components detected have actually node
# connectivity >= k.
for k, components in result.items():
if k < 3:
continue
for component in components:
K = nx.node_connectivity(G.subgraph(component))
assert K >= k
def test_directed():
with pytest.raises(nx.NetworkXNotImplemented):
G = nx.gnp_random_graph(10, 0.4, directed=True)
kc = k_components(G)
def test_same():
equal = {"A": 2, "B": 2, "C": 2}
slightly_different = {"A": 2, "B": 1, "C": 2}
different = {"A": 2, "B": 8, "C": 18}
assert _same(equal)
assert not _same(slightly_different)
assert _same(slightly_different, tol=1)
assert not _same(different)
assert not _same(different, tol=4)
class TestAntiGraph:
@classmethod
def setup_class(cls):
cls.Gnp = nx.gnp_random_graph(20, 0.8)
cls.Anp = _AntiGraph(nx.complement(cls.Gnp))
cls.Gd = nx.davis_southern_women_graph()
cls.Ad = _AntiGraph(nx.complement(cls.Gd))
cls.Gk = nx.karate_club_graph()
cls.Ak = _AntiGraph(nx.complement(cls.Gk))
cls.GA = [(cls.Gnp, cls.Anp), (cls.Gd, cls.Ad), (cls.Gk, cls.Ak)]
def test_size(self):
for G, A in self.GA:
n = G.order()
s = len(list(G.edges())) + len(list(A.edges()))
assert s == (n * (n - 1)) / 2
def test_degree(self):
for G, A in self.GA:
assert sorted(G.degree()) == sorted(A.degree())
def test_core_number(self):
for G, A in self.GA:
assert nx.core_number(G) == nx.core_number(A)
def test_connected_components(self):
for G, A in self.GA:
gc = [set(c) for c in nx.connected_components(G)]
ac = [set(c) for c in nx.connected_components(A)]
for comp in ac:
assert comp in gc
def test_adj(self):
for G, A in self.GA:
for n, nbrs in G.adj.items():
a_adj = sorted((n, sorted(ad)) for n, ad in A.adj.items())
g_adj = sorted((n, sorted(ad)) for n, ad in G.adj.items())
assert a_adj == g_adj
def test_adjacency(self):
for G, A in self.GA:
a_adj = list(A.adjacency())
for n, nbrs in G.adjacency():
assert (n, set(nbrs)) in a_adj
def test_neighbors(self):
for G, A in self.GA:
node = list(G.nodes())[0]
assert set(G.neighbors(node)) == set(A.neighbors(node))
def test_node_not_in_graph(self):
for G, A in self.GA:
node = "non_existent_node"
pytest.raises(nx.NetworkXError, A.neighbors, node)
pytest.raises(nx.NetworkXError, G.neighbors, node)
def test_degree_thingraph(self):
for G, A in self.GA:
node = list(G.nodes())[0]
nodes = list(G.nodes())[1:4]
assert G.degree(node) == A.degree(node)
assert sum(d for n, d in G.degree()) == sum(d for n, d in A.degree())
# AntiGraph is a ThinGraph, so all the weights are 1
assert sum(d for n, d in A.degree()) == sum(
d for n, d in A.degree(weight="weight")
)
assert sum(d for n, d in G.degree(nodes)) == sum(
d for n, d in A.degree(nodes)
)

@ -1,8 +0,0 @@
import networkx as nx
import networkx.algorithms.approximation as a
def test_min_maximal_matching():
# smoke test
G = nx.Graph()
assert len(a.min_maximal_matching(G)) == 0

@ -1,31 +0,0 @@
import networkx as nx
import networkx.algorithms.approximation as apxa
def test_ramsey():
# this should only find the complete graph
graph = nx.complete_graph(10)
c, i = apxa.ramsey_R2(graph)
cdens = nx.density(graph.subgraph(c))
assert cdens == 1.0, "clique not correctly found by ramsey!"
idens = nx.density(graph.subgraph(i))
assert idens == 0.0, "i-set not correctly found by ramsey!"
# this trival graph has no cliques. should just find i-sets
graph = nx.trivial_graph()
c, i = apxa.ramsey_R2(graph)
assert c == {0}, "clique not correctly found by ramsey!"
assert i == {0}, "i-set not correctly found by ramsey!"
graph = nx.barbell_graph(10, 5, nx.Graph())
c, i = apxa.ramsey_R2(graph)
cdens = nx.density(graph.subgraph(c))
assert cdens == 1.0, "clique not correctly found by ramsey!"
idens = nx.density(graph.subgraph(i))
assert idens == 0.0, "i-set not correctly found by ramsey!"
# add self-loops and test again
graph.add_edges_from([(n, n) for n in range(0, len(graph), 2)])
cc, ii = apxa.ramsey_R2(graph)
assert cc == c
assert ii == i

@ -1,83 +0,0 @@
import pytest
import networkx as nx
from networkx.algorithms.approximation.steinertree import metric_closure
from networkx.algorithms.approximation.steinertree import steiner_tree
from networkx.testing.utils import assert_edges_equal
class TestSteinerTree:
@classmethod
def setup_class(cls):
G = nx.Graph()
G.add_edge(1, 2, weight=10)
G.add_edge(2, 3, weight=10)
G.add_edge(3, 4, weight=10)
G.add_edge(4, 5, weight=10)
G.add_edge(5, 6, weight=10)
G.add_edge(2, 7, weight=1)
G.add_edge(7, 5, weight=1)
cls.G = G
cls.term_nodes = [1, 2, 3, 4, 5]
def test_connected_metric_closure(self):
G = self.G.copy()
G.add_node(100)
pytest.raises(nx.NetworkXError, metric_closure, G)
def test_metric_closure(self):
M = metric_closure(self.G)
mc = [
(1, 2, {"distance": 10, "path": [1, 2]}),
(1, 3, {"distance": 20, "path": [1, 2, 3]}),
(1, 4, {"distance": 22, "path": [1, 2, 7, 5, 4]}),
(1, 5, {"distance": 12, "path": [1, 2, 7, 5]}),
(1, 6, {"distance": 22, "path": [1, 2, 7, 5, 6]}),
(1, 7, {"distance": 11, "path": [1, 2, 7]}),
(2, 3, {"distance": 10, "path": [2, 3]}),
(2, 4, {"distance": 12, "path": [2, 7, 5, 4]}),
(2, 5, {"distance": 2, "path": [2, 7, 5]}),
(2, 6, {"distance": 12, "path": [2, 7, 5, 6]}),
(2, 7, {"distance": 1, "path": [2, 7]}),
(3, 4, {"distance": 10, "path": [3, 4]}),
(3, 5, {"distance": 12, "path": [3, 2, 7, 5]}),
(3, 6, {"distance": 22, "path": [3, 2, 7, 5, 6]}),
(3, 7, {"distance": 11, "path": [3, 2, 7]}),
(4, 5, {"distance": 10, "path": [4, 5]}),
(4, 6, {"distance": 20, "path": [4, 5, 6]}),
(4, 7, {"distance": 11, "path": [4, 5, 7]}),
(5, 6, {"distance": 10, "path": [5, 6]}),
(5, 7, {"distance": 1, "path": [5, 7]}),
(6, 7, {"distance": 11, "path": [6, 5, 7]}),
]
assert_edges_equal(list(M.edges(data=True)), mc)
def test_steiner_tree(self):
S = steiner_tree(self.G, self.term_nodes)
expected_steiner_tree = [
(1, 2, {"weight": 10}),
(2, 3, {"weight": 10}),
(2, 7, {"weight": 1}),
(3, 4, {"weight": 10}),
(5, 7, {"weight": 1}),
]
assert_edges_equal(list(S.edges(data=True)), expected_steiner_tree)
def test_multigraph_steiner_tree(self):
G = nx.MultiGraph()
G.add_edges_from(
[
(1, 2, 0, {"weight": 1}),
(2, 3, 0, {"weight": 999}),
(2, 3, 1, {"weight": 1}),
(3, 4, 0, {"weight": 1}),
(3, 5, 0, {"weight": 1}),
]
)
terminal_nodes = [2, 4, 5]
expected_edges = [
(2, 3, 1, {"weight": 1}), # edge with key 1 has lower weight
(3, 4, 0, {"weight": 1}),
(3, 5, 0, {"weight": 1}),
]
T = steiner_tree(G, terminal_nodes)
assert_edges_equal(T.edges(data=True, keys=True), expected_edges)

@ -1,269 +0,0 @@
import networkx as nx
from networkx.algorithms.approximation import treewidth_min_degree
from networkx.algorithms.approximation import treewidth_min_fill_in
from networkx.algorithms.approximation.treewidth import min_fill_in_heuristic
from networkx.algorithms.approximation.treewidth import MinDegreeHeuristic
import itertools
def is_tree_decomp(graph, decomp):
"""Check if the given tree decomposition is valid."""
for x in graph.nodes():
appear_once = False
for bag in decomp.nodes():
if x in bag:
appear_once = True
break
assert appear_once
# Check if each connected pair of nodes are at least once together in a bag
for (x, y) in graph.edges():
appear_together = False
for bag in decomp.nodes():
if x in bag and y in bag:
appear_together = True
break
assert appear_together
# Check if the nodes associated with vertex v form a connected subset of T
for v in graph.nodes():
subset = []
for bag in decomp.nodes():
if v in bag:
subset.append(bag)
sub_graph = decomp.subgraph(subset)
assert nx.is_connected(sub_graph)
class TestTreewidthMinDegree:
"""Unit tests for the min_degree function"""
@classmethod
def setup_class(cls):
"""Setup for different kinds of trees"""
cls.complete = nx.Graph()
cls.complete.add_edge(1, 2)
cls.complete.add_edge(2, 3)
cls.complete.add_edge(1, 3)
cls.small_tree = nx.Graph()
cls.small_tree.add_edge(1, 3)
cls.small_tree.add_edge(4, 3)
cls.small_tree.add_edge(2, 3)
cls.small_tree.add_edge(3, 5)
cls.small_tree.add_edge(5, 6)
cls.small_tree.add_edge(5, 7)
cls.small_tree.add_edge(6, 7)
cls.deterministic_graph = nx.Graph()
cls.deterministic_graph.add_edge(0, 1) # deg(0) = 1
cls.deterministic_graph.add_edge(1, 2) # deg(1) = 2
cls.deterministic_graph.add_edge(2, 3)
cls.deterministic_graph.add_edge(2, 4) # deg(2) = 3
cls.deterministic_graph.add_edge(3, 4)
cls.deterministic_graph.add_edge(3, 5)
cls.deterministic_graph.add_edge(3, 6) # deg(3) = 4
cls.deterministic_graph.add_edge(4, 5)
cls.deterministic_graph.add_edge(4, 6)
cls.deterministic_graph.add_edge(4, 7) # deg(4) = 5
cls.deterministic_graph.add_edge(5, 6)
cls.deterministic_graph.add_edge(5, 7)
cls.deterministic_graph.add_edge(5, 8)
cls.deterministic_graph.add_edge(5, 9) # deg(5) = 6
cls.deterministic_graph.add_edge(6, 7)
cls.deterministic_graph.add_edge(6, 8)
cls.deterministic_graph.add_edge(6, 9) # deg(6) = 6
cls.deterministic_graph.add_edge(7, 8)
cls.deterministic_graph.add_edge(7, 9) # deg(7) = 5
cls.deterministic_graph.add_edge(8, 9) # deg(8) = 4
def test_petersen_graph(self):
"""Test Petersen graph tree decomposition result"""
G = nx.petersen_graph()
_, decomp = treewidth_min_degree(G)
is_tree_decomp(G, decomp)
def test_small_tree_treewidth(self):
"""Test small tree
Test if the computed treewidth of the known self.small_tree is 2.
As we know which value we can expect from our heuristic, values other
than two are regressions
"""
G = self.small_tree
# the order of removal should be [1,2,4]3[5,6,7]
# (with [] denoting any order of the containing nodes)
# resulting in treewidth 2 for the heuristic
treewidth, _ = treewidth_min_fill_in(G)
assert treewidth == 2
def test_heuristic_abort(self):
"""Test heuristic abort condition for fully connected graph"""
graph = {}
for u in self.complete:
graph[u] = set()
for v in self.complete[u]:
if u != v: # ignore self-loop
graph[u].add(v)
deg_heuristic = MinDegreeHeuristic(graph)
node = deg_heuristic.best_node(graph)
if node is None:
pass
else:
assert False
def test_empty_graph(self):
"""Test empty graph"""
G = nx.Graph()
_, _ = treewidth_min_degree(G)
def test_two_component_graph(self):
"""Test empty graph"""
G = nx.Graph()
G.add_node(1)
G.add_node(2)
treewidth, _ = treewidth_min_degree(G)
assert treewidth == 0
def test_heuristic_first_steps(self):
"""Test first steps of min_degree heuristic"""
graph = {
n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph
}
deg_heuristic = MinDegreeHeuristic(graph)
elim_node = deg_heuristic.best_node(graph)
print(f"Graph {graph}:")
steps = []
while elim_node is not None:
print(f"Removing {elim_node}:")
steps.append(elim_node)
nbrs = graph[elim_node]
for u, v in itertools.permutations(nbrs, 2):
if v not in graph[u]:
graph[u].add(v)
for u in graph:
if elim_node in graph[u]:
graph[u].remove(elim_node)
del graph[elim_node]
print(f"Graph {graph}:")
elim_node = deg_heuristic.best_node(graph)
# check only the first 5 elements for equality
assert steps[:5] == [0, 1, 2, 3, 4]
class TestTreewidthMinFillIn:
"""Unit tests for the treewidth_min_fill_in function."""
@classmethod
def setup_class(cls):
"""Setup for different kinds of trees"""
cls.complete = nx.Graph()
cls.complete.add_edge(1, 2)
cls.complete.add_edge(2, 3)
cls.complete.add_edge(1, 3)
cls.small_tree = nx.Graph()
cls.small_tree.add_edge(1, 2)
cls.small_tree.add_edge(2, 3)
cls.small_tree.add_edge(3, 4)
cls.small_tree.add_edge(1, 4)
cls.small_tree.add_edge(2, 4)
cls.small_tree.add_edge(4, 5)
cls.small_tree.add_edge(5, 6)
cls.small_tree.add_edge(5, 7)
cls.small_tree.add_edge(6, 7)
cls.deterministic_graph = nx.Graph()
cls.deterministic_graph.add_edge(1, 2)
cls.deterministic_graph.add_edge(1, 3)
cls.deterministic_graph.add_edge(3, 4)
cls.deterministic_graph.add_edge(2, 4)
cls.deterministic_graph.add_edge(3, 5)
cls.deterministic_graph.add_edge(4, 5)
cls.deterministic_graph.add_edge(3, 6)
cls.deterministic_graph.add_edge(5, 6)
def test_petersen_graph(self):
"""Test Petersen graph tree decomposition result"""
G = nx.petersen_graph()
_, decomp = treewidth_min_fill_in(G)
is_tree_decomp(G, decomp)
def test_small_tree_treewidth(self):
"""Test if the computed treewidth of the known self.small_tree is 2"""
G = self.small_tree
# the order of removal should be [1,2,4]3[5,6,7]
# (with [] denoting any order of the containing nodes)
# resulting in treewidth 2 for the heuristic
treewidth, _ = treewidth_min_fill_in(G)
assert treewidth == 2
def test_heuristic_abort(self):
"""Test if min_fill_in returns None for fully connected graph"""
graph = {}
for u in self.complete:
graph[u] = set()
for v in self.complete[u]:
if u != v: # ignore self-loop
graph[u].add(v)
next_node = min_fill_in_heuristic(graph)
if next_node is None:
pass
else:
assert False
def test_empty_graph(self):
"""Test empty graph"""
G = nx.Graph()
_, _ = treewidth_min_fill_in(G)
def test_two_component_graph(self):
"""Test empty graph"""
G = nx.Graph()
G.add_node(1)
G.add_node(2)
treewidth, _ = treewidth_min_fill_in(G)
assert treewidth == 0
def test_heuristic_first_steps(self):
"""Test first steps of min_fill_in heuristic"""
graph = {
n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph
}
print(f"Graph {graph}:")
elim_node = min_fill_in_heuristic(graph)
steps = []
while elim_node is not None:
print(f"Removing {elim_node}:")
steps.append(elim_node)
nbrs = graph[elim_node]
for u, v in itertools.permutations(nbrs, 2):
if v not in graph[u]:
graph[u].add(v)
for u in graph:
if elim_node in graph[u]:
graph[u].remove(elim_node)
del graph[elim_node]
print(f"Graph {graph}:")
elim_node = min_fill_in_heuristic(graph)
# check only the first 2 elements for equality
assert steps[:2] == [6, 5]

@ -1,55 +0,0 @@
import networkx as nx
from networkx.algorithms.approximation import min_weighted_vertex_cover
def is_cover(G, node_cover):
return all({u, v} & node_cover for u, v in G.edges())
class TestMWVC:
"""Unit tests for the approximate minimum weighted vertex cover
function,
:func:`~networkx.algorithms.approximation.vertex_cover.min_weighted_vertex_cover`.
"""
def test_unweighted_directed(self):
# Create a star graph in which half the nodes are directed in
# and half are directed out.
G = nx.DiGraph()
G.add_edges_from((0, v) for v in range(1, 26))
G.add_edges_from((v, 0) for v in range(26, 51))
cover = min_weighted_vertex_cover(G)
assert 2 == len(cover)
assert is_cover(G, cover)
def test_unweighted_undirected(self):
# create a simple star graph
size = 50
sg = nx.star_graph(size)
cover = min_weighted_vertex_cover(sg)
assert 2 == len(cover)
assert is_cover(sg, cover)
def test_weighted(self):
wg = nx.Graph()
wg.add_node(0, weight=10)
wg.add_node(1, weight=1)
wg.add_node(2, weight=1)
wg.add_node(3, weight=1)
wg.add_node(4, weight=1)
wg.add_edge(0, 1)
wg.add_edge(0, 2)
wg.add_edge(0, 3)
wg.add_edge(0, 4)
wg.add_edge(1, 2)
wg.add_edge(2, 3)
wg.add_edge(3, 4)
wg.add_edge(4, 1)
cover = min_weighted_vertex_cover(wg, weight="weight")
csum = sum(wg.nodes[node]["weight"] for node in cover)
assert 4 == csum
assert is_cover(wg, cover)

@ -1,249 +0,0 @@
"""Functions for computing treewidth decomposition.
Treewidth of an undirected graph is a number associated with the graph.
It can be defined as the size of the largest vertex set (bag) in a tree
decomposition of the graph minus one.
`Wikipedia: Treewidth <https://en.wikipedia.org/wiki/Treewidth>`_
The notions of treewidth and tree decomposition have gained their
attractiveness partly because many graph and network problems that are
intractable (e.g., NP-hard) on arbitrary graphs become efficiently
solvable (e.g., with a linear time algorithm) when the treewidth of the
input graphs is bounded by a constant [1]_ [2]_.
There are two different functions for computing a tree decomposition:
:func:`treewidth_min_degree` and :func:`treewidth_min_fill_in`.
.. [1] Hans L. Bodlaender and Arie M. C. A. Koster. 2010. "Treewidth
computations I.Upper bounds". Inf. Comput. 208, 3 (March 2010),259-275.
http://dx.doi.org/10.1016/j.ic.2009.03.008
.. [2] Hans L. Bodlaender. "Discovering Treewidth". Institute of Information
and Computing Sciences, Utrecht University.
Technical Report UU-CS-2005-018.
http://www.cs.uu.nl
.. [3] K. Wang, Z. Lu, and J. Hicks *Treewidth*.
http://web.eecs.utk.edu/~cphillip/cs594_spring2015_projects/treewidth.pdf
"""
import sys
import networkx as nx
from networkx.utils import not_implemented_for
from heapq import heappush, heappop, heapify
import itertools
__all__ = ["treewidth_min_degree", "treewidth_min_fill_in"]
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def treewidth_min_degree(G):
""" Returns a treewidth decomposition using the Minimum Degree heuristic.
The heuristic chooses the nodes according to their degree, i.e., first
the node with the lowest degree is chosen, then the graph is updated
and the corresponding node is removed. Next, a new node with the lowest
degree is chosen, and so on.
Parameters
----------
G : NetworkX graph
Returns
-------
Treewidth decomposition : (int, Graph) tuple
2-tuple with treewidth and the corresponding decomposed tree.
"""
deg_heuristic = MinDegreeHeuristic(G)
return treewidth_decomp(G, lambda graph: deg_heuristic.best_node(graph))
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def treewidth_min_fill_in(G):
""" Returns a treewidth decomposition using the Minimum Fill-in heuristic.
The heuristic chooses a node from the graph, where the number of edges
added turning the neighbourhood of the chosen node into clique is as
small as possible.
Parameters
----------
G : NetworkX graph
Returns
-------
Treewidth decomposition : (int, Graph) tuple
2-tuple with treewidth and the corresponding decomposed tree.
"""
return treewidth_decomp(G, min_fill_in_heuristic)
class MinDegreeHeuristic:
""" Implements the Minimum Degree heuristic.
The heuristic chooses the nodes according to their degree
(number of neighbours), i.e., first the node with the lowest degree is
chosen, then the graph is updated and the corresponding node is
removed. Next, a new node with the lowest degree is chosen, and so on.
"""
def __init__(self, graph):
self._graph = graph
# nodes that have to be updated in the heap before each iteration
self._update_nodes = []
self._degreeq = [] # a heapq with 2-tuples (degree,node)
# build heap with initial degrees
for n in graph:
self._degreeq.append((len(graph[n]), n))
heapify(self._degreeq)
def best_node(self, graph):
# update nodes in self._update_nodes
for n in self._update_nodes:
# insert changed degrees into degreeq
heappush(self._degreeq, (len(graph[n]), n))
# get the next valid (minimum degree) node
while self._degreeq:
(min_degree, elim_node) = heappop(self._degreeq)
if elim_node not in graph or len(graph[elim_node]) != min_degree:
# outdated entry in degreeq
continue
elif min_degree == len(graph) - 1:
# fully connected: abort condition
return None
# remember to update nodes in the heap before getting the next node
self._update_nodes = graph[elim_node]
return elim_node
# the heap is empty: abort
return None
def min_fill_in_heuristic(graph):
""" Implements the Minimum Degree heuristic.
Returns the node from the graph, where the number of edges added when
turning the neighbourhood of the chosen node into clique is as small as
possible. This algorithm chooses the nodes using the Minimum Fill-In
heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses
additional constant memory."""
if len(graph) == 0:
return None
min_fill_in_node = None
min_fill_in = sys.maxsize
# create sorted list of (degree, node)
degree_list = [(len(graph[node]), node) for node in graph]
degree_list.sort()
# abort condition
min_degree = degree_list[0][0]
if min_degree == len(graph) - 1:
return None
for (_, node) in degree_list:
num_fill_in = 0
nbrs = graph[node]
for nbr in nbrs:
# count how many nodes in nbrs current nbr is not connected to
# subtract 1 for the node itself
num_fill_in += len(nbrs - graph[nbr]) - 1
if num_fill_in >= 2 * min_fill_in:
break
num_fill_in /= 2 # divide by 2 because of double counting
if num_fill_in < min_fill_in: # update min-fill-in node
if num_fill_in == 0:
return node
min_fill_in = num_fill_in
min_fill_in_node = node
return min_fill_in_node
def treewidth_decomp(G, heuristic=min_fill_in_heuristic):
""" Returns a treewidth decomposition using the passed heuristic.
Parameters
----------
G : NetworkX graph
heuristic : heuristic function
Returns
-------
Treewidth decomposition : (int, Graph) tuple
2-tuple with treewidth and the corresponding decomposed tree.
"""
# make dict-of-sets structure
graph = {n: set(G[n]) - {n} for n in G}
# stack containing nodes and neighbors in the order from the heuristic
node_stack = []
# get first node from heuristic
elim_node = heuristic(graph)
while elim_node is not None:
# connect all neighbours with each other
nbrs = graph[elim_node]
for u, v in itertools.permutations(nbrs, 2):
if v not in graph[u]:
graph[u].add(v)
# push node and its current neighbors on stack
node_stack.append((elim_node, nbrs))
# remove node from graph
for u in graph[elim_node]:
graph[u].remove(elim_node)
del graph[elim_node]
elim_node = heuristic(graph)
# the abort condition is met; put all remaining nodes into one bag
decomp = nx.Graph()
first_bag = frozenset(graph.keys())
decomp.add_node(first_bag)
treewidth = len(first_bag) - 1
while node_stack:
# get node and its neighbors from the stack
(curr_node, nbrs) = node_stack.pop()
# find a bag all neighbors are in
old_bag = None
for bag in decomp.nodes:
if nbrs <= bag:
old_bag = bag
break
if old_bag is None:
# no old_bag was found: just connect to the first_bag
old_bag = first_bag
# create new node for decomposition
nbrs.add(curr_node)
new_bag = frozenset(nbrs)
# update treewidth
treewidth = max(treewidth, len(new_bag) - 1)
# add edge to decomposition (implicitly also adds the new node)
decomp.add_edge(old_bag, new_bag)
return treewidth, decomp

@ -1,74 +0,0 @@
"""Functions for computing an approximate minimum weight vertex cover.
A |vertex cover|_ is a subset of nodes such that each edge in the graph
is incident to at least one node in the subset.
.. _vertex cover: https://en.wikipedia.org/wiki/Vertex_cover
.. |vertex cover| replace:: *vertex cover*
"""
__all__ = ["min_weighted_vertex_cover"]
def min_weighted_vertex_cover(G, weight=None):
r"""Returns an approximate minimum weighted vertex cover.
The set of nodes returned by this function is guaranteed to be a
vertex cover, and the total weight of the set is guaranteed to be at
most twice the total weight of the minimum weight vertex cover. In
other words,
.. math::
w(S) \leq 2 * w(S^*),
where $S$ is the vertex cover returned by this function,
$S^*$ is the vertex cover of minimum weight out of all vertex
covers of the graph, and $w$ is the function that computes the
sum of the weights of each node in that given set.
Parameters
----------
G : NetworkX graph
weight : string, optional (default = None)
If None, every node has weight 1. If a string, use this node
attribute as the node weight. A node without this attribute is
assumed to have weight 1.
Returns
-------
min_weighted_cover : set
Returns a set of nodes whose weight sum is no more than twice
the weight sum of the minimum weight vertex cover.
Notes
-----
For a directed graph, a vertex cover has the same definition: a set
of nodes such that each edge in the graph is incident to at least
one node in the set. Whether the node is the head or tail of the
directed edge is ignored.
This is the local-ratio algorithm for computing an approximate
vertex cover. The algorithm greedily reduces the costs over edges,
iteratively building a cover. The worst-case runtime of this
implementation is $O(m \log n)$, where $n$ is the number
of nodes and $m$ the number of edges in the graph.
References
----------
.. [1] Bar-Yehuda, R., and Even, S. (1985). "A local-ratio theorem for
approximating the weighted vertex cover problem."
*Annals of Discrete Mathematics*, 25, 2746
<http://www.cs.technion.ac.il/~reuven/PDF/vc_lr.pdf>
"""
cost = dict(G.nodes(data=weight, default=1))
# While there are uncovered edges, choose an uncovered and update
# the cost of the remaining edges.
for u, v in G.edges():
min_cost = min(cost[u], cost[v])
cost[u] -= min_cost
cost[v] -= min_cost
return {u for u, c in cost.items() if c == 0}

@ -1,5 +0,0 @@
from networkx.algorithms.assortativity.connectivity import *
from networkx.algorithms.assortativity.correlation import *
from networkx.algorithms.assortativity.mixing import *
from networkx.algorithms.assortativity.neighbor_degree import *
from networkx.algorithms.assortativity.pairs import *

@ -1,128 +0,0 @@
from collections import defaultdict
__all__ = ["average_degree_connectivity", "k_nearest_neighbors"]
def average_degree_connectivity(
G, source="in+out", target="in+out", nodes=None, weight=None
):
r"""Compute the average degree connectivity of graph.
The average degree connectivity is the average nearest neighbor degree of
nodes with degree k. For weighted graphs, an analogous measure can
be computed using the weighted average neighbors degree defined in
[1]_, for a node `i`, as
.. math::
k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
where `s_i` is the weighted degree of node `i`,
`w_{ij}` is the weight of the edge that links `i` and `j`,
and `N(i)` are the neighbors of node `i`.
Parameters
----------
G : NetworkX graph
source : "in"|"out"|"in+out" (default:"in+out")
Directed graphs only. Use "in"- or "out"-degree for source node.
target : "in"|"out"|"in+out" (default:"in+out"
Directed graphs only. Use "in"- or "out"-degree for target node.
nodes : list or iterable (optional)
Compute neighbor connectivity for these nodes. The default is all
nodes.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
Returns
-------
d : dict
A dictionary keyed by degree k with the value of average connectivity.
Raises
------
ValueError
If either `source` or `target` are not one of 'in',
'out', or 'in+out'.
Examples
--------
>>> G = nx.path_graph(4)
>>> G.edges[1, 2]["weight"] = 3
>>> nx.k_nearest_neighbors(G)
{1: 2.0, 2: 1.5}
>>> nx.k_nearest_neighbors(G, weight="weight")
{1: 2.0, 2: 1.75}
See also
--------
neighbors_average_degree
Notes
-----
This algorithm is sometimes called "k nearest neighbors" and is also
available as `k_nearest_neighbors`.
References
----------
.. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
"The architecture of complex weighted networks".
PNAS 101 (11): 37473752 (2004).
"""
# First, determine the type of neighbors and the type of degree to use.
if G.is_directed():
if source not in ("in", "out", "in+out"):
raise ValueError('source must be one of "in", "out", or "in+out"')
if target not in ("in", "out", "in+out"):
raise ValueError('target must be one of "in", "out", or "in+out"')
direction = {"out": G.out_degree, "in": G.in_degree, "in+out": G.degree}
neighbor_funcs = {
"out": G.successors,
"in": G.predecessors,
"in+out": G.neighbors,
}
source_degree = direction[source]
target_degree = direction[target]
neighbors = neighbor_funcs[source]
# `reverse` indicates whether to look at the in-edge when
# computing the weight of an edge.
reverse = source == "in"
else:
source_degree = G.degree
target_degree = G.degree
neighbors = G.neighbors
reverse = False
dsum = defaultdict(int)
dnorm = defaultdict(int)
# Check if `source_nodes` is actually a single node in the graph.
source_nodes = source_degree(nodes)
if nodes in G:
source_nodes = [(nodes, source_degree(nodes))]
for n, k in source_nodes:
nbrdeg = target_degree(neighbors(n))
if weight is None:
s = sum(d for n, d in nbrdeg)
else: # weight nbr degree by weight of (n,nbr) edge
if reverse:
s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg)
else:
s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg)
dnorm[k] += source_degree(n, weight=weight)
dsum[k] += s
# normalize
dc = {}
for k, avg in dsum.items():
dc[k] = avg
norm = dnorm[k]
if avg > 0 and norm > 0:
dc[k] /= norm
return dc
k_nearest_neighbors = average_degree_connectivity

@ -1,287 +0,0 @@
"""Node assortativity coefficients and correlation measures.
"""
from networkx.algorithms.assortativity.mixing import (
degree_mixing_matrix,
attribute_mixing_matrix,
numeric_mixing_matrix,
)
from networkx.algorithms.assortativity.pairs import node_degree_xy
__all__ = [
"degree_pearson_correlation_coefficient",
"degree_assortativity_coefficient",
"attribute_assortativity_coefficient",
"numeric_assortativity_coefficient",
]
def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None):
"""Compute degree assortativity of graph.
Assortativity measures the similarity of connections
in the graph with respect to the node degree.
Parameters
----------
G : NetworkX graph
x: string ('in','out')
The degree type for source node (directed graphs only).
y: string ('in','out')
The degree type for target node (directed graphs only).
weight: string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
nodes: list or iterable (optional)
Compute degree assortativity only for nodes in container.
The default is all nodes.
Returns
-------
r : float
Assortativity of graph by degree.
Examples
--------
>>> G = nx.path_graph(4)
>>> r = nx.degree_assortativity_coefficient(G)
>>> print(f"{r:3.1f}")
-0.5
See Also
--------
attribute_assortativity_coefficient
numeric_assortativity_coefficient
neighbor_connectivity
degree_mixing_dict
degree_mixing_matrix
Notes
-----
This computes Eq. (21) in Ref. [1]_ , where e is the joint
probability distribution (mixing matrix) of the degrees. If G is
directed than the matrix e is the joint probability of the
user-specified degree type for the source and target.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
.. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
"""
M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight)
return numeric_ac(M)
def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None):
"""Compute degree assortativity of graph.
Assortativity measures the similarity of connections
in the graph with respect to the node degree.
This is the same as degree_assortativity_coefficient but uses the
potentially faster scipy.stats.pearsonr function.
Parameters
----------
G : NetworkX graph
x: string ('in','out')
The degree type for source node (directed graphs only).
y: string ('in','out')
The degree type for target node (directed graphs only).
weight: string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
nodes: list or iterable (optional)
Compute pearson correlation of degrees only for specified nodes.
The default is all nodes.
Returns
-------
r : float
Assortativity of graph by degree.
Examples
--------
>>> G = nx.path_graph(4)
>>> r = nx.degree_pearson_correlation_coefficient(G)
>>> print(f"{r:3.1f}")
-0.5
Notes
-----
This calls scipy.stats.pearsonr.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks
Physical Review E, 67 026126, 2003
.. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
"""
try:
import scipy.stats as stats
except ImportError as e:
raise ImportError("Assortativity requires SciPy:" "http://scipy.org/ ") from e
xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
x, y = zip(*xy)
return stats.pearsonr(x, y)[0]
def attribute_assortativity_coefficient(G, attribute, nodes=None):
"""Compute assortativity for node attributes.
Assortativity measures the similarity of connections
in the graph with respect to the given attribute.
Parameters
----------
G : NetworkX graph
attribute : string
Node attribute key
nodes: list or iterable (optional)
Compute attribute assortativity for nodes in container.
The default is all nodes.
Returns
-------
r: float
Assortativity of graph for given attribute
Examples
--------
>>> G = nx.Graph()
>>> G.add_nodes_from([0, 1], color="red")
>>> G.add_nodes_from([2, 3], color="blue")
>>> G.add_edges_from([(0, 1), (2, 3)])
>>> print(nx.attribute_assortativity_coefficient(G, "color"))
1.0
Notes
-----
This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)),
where M is the joint probability distribution (mixing matrix)
of the specified attribute.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
"""
M = attribute_mixing_matrix(G, attribute, nodes)
return attribute_ac(M)
def numeric_assortativity_coefficient(G, attribute, nodes=None):
"""Compute assortativity for numerical node attributes.
Assortativity measures the similarity of connections
in the graph with respect to the given numeric attribute.
The numeric attribute must be an integer.
Parameters
----------
G : NetworkX graph
attribute : string
Node attribute key. The corresponding attribute value must be an
integer.
nodes: list or iterable (optional)
Compute numeric assortativity only for attributes of nodes in
container. The default is all nodes.
Returns
-------
r: float
Assortativity of graph for given attribute
Examples
--------
>>> G = nx.Graph()
>>> G.add_nodes_from([0, 1], size=2)
>>> G.add_nodes_from([2, 3], size=3)
>>> G.add_edges_from([(0, 1), (2, 3)])
>>> print(nx.numeric_assortativity_coefficient(G, "size"))
1.0
Notes
-----
This computes Eq. (21) in Ref. [1]_ , for the mixing matrix of
of the specified attribute.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks
Physical Review E, 67 026126, 2003
"""
a = numeric_mixing_matrix(G, attribute, nodes)
return numeric_ac(a)
def attribute_ac(M):
"""Compute assortativity for attribute matrix M.
Parameters
----------
M : numpy.ndarray
2D ndarray representing the attribute mixing matrix.
Notes
-----
This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)),
where e is the joint probability distribution (mixing matrix)
of the specified attribute.
References
----------
.. [1] M. E. J. Newman, Mixing patterns in networks,
Physical Review E, 67 026126, 2003
"""
try:
import numpy
except ImportError as e:
raise ImportError(
"attribute_assortativity requires " "NumPy: http://scipy.org/"
) from e
if M.sum() != 1.0:
M = M / M.sum()
s = (M @ M).sum()
t = M.trace()
r = (t - s) / (1 - s)
return r
def numeric_ac(M):
# M is a numpy matrix or array
# numeric assortativity coefficient, pearsonr
try:
import numpy
except ImportError as e:
raise ImportError(
"numeric_assortativity requires " "NumPy: http://scipy.org/"
) from e
if M.sum() != 1.0:
M = M / float(M.sum())
nx, ny = M.shape # nx=ny
x = numpy.arange(nx)
y = numpy.arange(ny)
a = M.sum(axis=0)
b = M.sum(axis=1)
vara = (a * x ** 2).sum() - ((a * x).sum()) ** 2
varb = (b * x ** 2).sum() - ((b * x).sum()) ** 2
xy = numpy.outer(x, y)
ab = numpy.outer(a, b)
return (xy * (M - ab)).sum() / numpy.sqrt(vara * varb)

@ -1,233 +0,0 @@
"""
Mixing matrices for node attributes and degree.
"""
from networkx.utils import dict_to_numpy_array
from networkx.algorithms.assortativity.pairs import node_degree_xy, node_attribute_xy
__all__ = [
"attribute_mixing_matrix",
"attribute_mixing_dict",
"degree_mixing_matrix",
"degree_mixing_dict",
"numeric_mixing_matrix",
"mixing_dict",
]
def attribute_mixing_dict(G, attribute, nodes=None, normalized=False):
"""Returns dictionary representation of mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key.
nodes: list or iterable (optional)
Unse nodes in container to build the dict. The default is all nodes.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Examples
--------
>>> G = nx.Graph()
>>> G.add_nodes_from([0, 1], color="red")
>>> G.add_nodes_from([2, 3], color="blue")
>>> G.add_edge(1, 3)
>>> d = nx.attribute_mixing_dict(G, "color")
>>> print(d["red"]["blue"])
1
>>> print(d["blue"]["red"]) # d symmetric for undirected graphs
1
Returns
-------
d : dictionary
Counts or joint probability of occurrence of attribute pairs.
"""
xy_iter = node_attribute_xy(G, attribute, nodes)
return mixing_dict(xy_iter, normalized=normalized)
def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None, normalized=True):
"""Returns mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key.
nodes: list or iterable (optional)
Use only nodes in container to build the matrix. The default is
all nodes.
mapping : dictionary, optional
Mapping from node attribute to integer index in matrix.
If not specified, an arbitrary ordering will be used.
normalized : bool (default=True)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts or joint probability of occurrence of attribute pairs.
"""
d = attribute_mixing_dict(G, attribute, nodes)
a = dict_to_numpy_array(d, mapping=mapping)
if normalized:
a = a / a.sum()
return a
def degree_mixing_dict(G, x="out", y="in", weight=None, nodes=None, normalized=False):
"""Returns dictionary representation of mixing matrix for degree.
Parameters
----------
G : graph
NetworkX graph object.
x: string ('in','out')
The degree type for source node (directed graphs only).
y: string ('in','out')
The degree type for target node (directed graphs only).
weight: string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
d: dictionary
Counts or joint probability of occurrence of degree pairs.
"""
xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
return mixing_dict(xy_iter, normalized=normalized)
def degree_mixing_matrix(G, x="out", y="in", weight=None, nodes=None, normalized=True):
"""Returns mixing matrix for attribute.
Parameters
----------
G : graph
NetworkX graph object.
x: string ('in','out')
The degree type for source node (directed graphs only).
y: string ('in','out')
The degree type for target node (directed graphs only).
nodes: list or iterable (optional)
Build the matrix using only nodes in container.
The default is all nodes.
weight: string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
normalized : bool (default=True)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts, or joint probability, of occurrence of node degree.
"""
d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight)
s = set(d.keys())
for k, v in d.items():
s.update(v.keys())
m = max(s)
mapping = {x: x for x in range(m + 1)}
a = dict_to_numpy_array(d, mapping=mapping)
if normalized:
a = a / a.sum()
return a
def numeric_mixing_matrix(G, attribute, nodes=None, normalized=True):
"""Returns numeric mixing matrix for attribute.
The attribute must be an integer.
Parameters
----------
G : graph
NetworkX graph object.
attribute : string
Node attribute key. The corresponding attribute must be an integer.
nodes: list or iterable (optional)
Build the matrix only with nodes in container. The default is all nodes.
normalized : bool (default=True)
Return counts if False or probabilities if True.
Returns
-------
m: numpy array
Counts, or joint, probability of occurrence of node attribute pairs.
"""
d = attribute_mixing_dict(G, attribute, nodes)
s = set(d.keys())
for k, v in d.items():
s.update(v.keys())
m = max(s)
mapping = {x: x for x in range(m + 1)}
a = dict_to_numpy_array(d, mapping=mapping)
if normalized:
a = a / a.sum()
return a
def mixing_dict(xy, normalized=False):
"""Returns a dictionary representation of mixing matrix.
Parameters
----------
xy : list or container of two-tuples
Pairs of (x,y) items.
attribute : string
Node attribute key
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
d: dictionary
Counts or Joint probability of occurrence of values in xy.
"""
d = {}
psum = 0.0
for x, y in xy:
if x not in d:
d[x] = {}
if y not in d:
d[y] = {}
v = d[x].get(y, 0)
d[x][y] = v + 1
psum += 1
if normalized:
for k, jdict in d.items():
for j in jdict:
jdict[j] /= psum
return d

@ -1,122 +0,0 @@
__all__ = ["average_neighbor_degree"]
def _average_nbr_deg(G, source_degree, target_degree, nodes=None, weight=None):
# average degree of neighbors
avg = {}
for n, deg in source_degree(nodes, weight=weight):
# normalize but not by zero degree
if deg == 0:
deg = 1
nbrdeg = target_degree(G[n])
if weight is None:
avg[n] = sum(d for n, d in nbrdeg) / float(deg)
else:
avg[n] = sum((G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg)) / float(
deg
)
return avg
def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None):
r"""Returns the average degree of the neighborhood of each node.
The average neighborhood degree of a node `i` is
.. math::
k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j
where `N(i)` are the neighbors of node `i` and `k_j` is
the degree of node `j` which belongs to `N(i)`. For weighted
graphs, an analogous measure can be defined [1]_,
.. math::
k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
where `s_i` is the weighted degree of node `i`, `w_{ij}`
is the weight of the edge that links `i` and `j` and
`N(i)` are the neighbors of node `i`.
Parameters
----------
G : NetworkX graph
source : string ("in"|"out")
Directed graphs only.
Use "in"- or "out"-degree for source node.
target : string ("in"|"out")
Directed graphs only.
Use "in"- or "out"-degree for target node.
nodes : list or iterable, optional
Compute neighbor degree for specified nodes. The default is
all nodes in the graph.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
Returns
-------
d: dict
A dictionary keyed by node with average neighbors degree value.
Examples
--------
>>> G = nx.path_graph(4)
>>> G.edges[0, 1]["weight"] = 5
>>> G.edges[2, 3]["weight"] = 3
>>> nx.average_neighbor_degree(G)
{0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0}
>>> nx.average_neighbor_degree(G, weight="weight")
{0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0}
>>> G = nx.DiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> nx.average_neighbor_degree(G, source="in", target="in")
{0: 1.0, 1: 1.0, 2: 1.0, 3: 0.0}
>>> nx.average_neighbor_degree(G, source="out", target="out")
{0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0}
Notes
-----
For directed graphs you can also specify in-degree or out-degree
by passing keyword arguments.
See Also
--------
average_degree_connectivity
References
----------
.. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
"The architecture of complex weighted networks".
PNAS 101 (11): 37473752 (2004).
"""
source_degree = G.degree
target_degree = G.degree
if G.is_directed():
direction = {"out": G.out_degree, "in": G.in_degree}
source_degree = direction[source]
target_degree = direction[target]
return _average_nbr_deg(G, source_degree, target_degree, nodes=nodes, weight=weight)
# obsolete
# def average_neighbor_in_degree(G, nodes=None, weight=None):
# if not G.is_directed():
# raise nx.NetworkXError("Not defined for undirected graphs.")
# return _average_nbr_deg(G, G.in_degree, G.in_degree, nodes, weight)
# average_neighbor_in_degree.__doc__=average_neighbor_degree.__doc__
# def average_neighbor_out_degree(G, nodes=None, weight=None):
# if not G.is_directed():
# raise nx.NetworkXError("Not defined for undirected graphs.")
# return _average_nbr_deg(G, G.out_degree, G.out_degree, nodes, weight)
# average_neighbor_out_degree.__doc__=average_neighbor_degree.__doc__

@ -1,116 +0,0 @@
"""Generators of x-y pairs of node data."""
__all__ = ["node_attribute_xy", "node_degree_xy"]
def node_attribute_xy(G, attribute, nodes=None):
"""Returns iterator of node-attribute pairs for all edges in G.
Parameters
----------
G: NetworkX graph
attribute: key
The node attribute key.
nodes: list or iterable (optional)
Use only edges that are adjacency to specified nodes.
The default is all nodes.
Returns
-------
(x,y): 2-tuple
Generates 2-tuple of (attribute,attribute) values.
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_node(1, color="red")
>>> G.add_node(2, color="blue")
>>> G.add_edge(1, 2)
>>> list(nx.node_attribute_xy(G, "color"))
[('red', 'blue')]
Notes
-----
For undirected graphs each edge is produced twice, once for each edge
representation (u,v) and (v,u), with the exception of self-loop edges
which only appear once.
"""
if nodes is None:
nodes = set(G)
else:
nodes = set(nodes)
Gnodes = G.nodes
for u, nbrsdict in G.adjacency():
if u not in nodes:
continue
uattr = Gnodes[u].get(attribute, None)
if G.is_multigraph():
for v, keys in nbrsdict.items():
vattr = Gnodes[v].get(attribute, None)
for k, d in keys.items():
yield (uattr, vattr)
else:
for v, eattr in nbrsdict.items():
vattr = Gnodes[v].get(attribute, None)
yield (uattr, vattr)
def node_degree_xy(G, x="out", y="in", weight=None, nodes=None):
"""Generate node degree-degree pairs for edges in G.
Parameters
----------
G: NetworkX graph
x: string ('in','out')
The degree type for source node (directed graphs only).
y: string ('in','out')
The degree type for target node (directed graphs only).
weight: string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
nodes: list or iterable (optional)
Use only edges that are adjacency to specified nodes.
The default is all nodes.
Returns
-------
(x,y): 2-tuple
Generates 2-tuple of (degree,degree) values.
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_edge(1, 2)
>>> list(nx.node_degree_xy(G, x="out", y="in"))
[(1, 1)]
>>> list(nx.node_degree_xy(G, x="in", y="out"))
[(0, 0)]
Notes
-----
For undirected graphs each edge is produced twice, once for each edge
representation (u,v) and (v,u), with the exception of self-loop edges
which only appear once.
"""
if nodes is None:
nodes = set(G)
else:
nodes = set(nodes)
xdeg = G.degree
ydeg = G.degree
if G.is_directed():
direction = {"out": G.out_degree, "in": G.in_degree}
xdeg = direction[x]
ydeg = direction[y]
for u, degu in xdeg(nodes, weight=weight):
neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes)
for v, degv in ydeg(neighbors, weight=weight):
yield degu, degv

@ -1,51 +0,0 @@
import networkx as nx
class BaseTestAttributeMixing:
@classmethod
def setup_class(cls):
G = nx.Graph()
G.add_nodes_from([0, 1], fish="one")
G.add_nodes_from([2, 3], fish="two")
G.add_nodes_from([4], fish="red")
G.add_nodes_from([5], fish="blue")
G.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)])
cls.G = G
D = nx.DiGraph()
D.add_nodes_from([0, 1], fish="one")
D.add_nodes_from([2, 3], fish="two")
D.add_nodes_from([4], fish="red")
D.add_nodes_from([5], fish="blue")
D.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)])
cls.D = D
M = nx.MultiGraph()
M.add_nodes_from([0, 1], fish="one")
M.add_nodes_from([2, 3], fish="two")
M.add_nodes_from([4], fish="red")
M.add_nodes_from([5], fish="blue")
M.add_edges_from([(0, 1), (0, 1), (2, 3)])
cls.M = M
S = nx.Graph()
S.add_nodes_from([0, 1], fish="one")
S.add_nodes_from([2, 3], fish="two")
S.add_nodes_from([4], fish="red")
S.add_nodes_from([5], fish="blue")
S.add_edge(0, 0)
S.add_edge(2, 2)
cls.S = S
class BaseTestDegreeMixing:
@classmethod
def setup_class(cls):
cls.P4 = nx.path_graph(4)
cls.D = nx.DiGraph()
cls.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)])
cls.M = nx.MultiGraph()
nx.add_path(cls.M, range(4))
cls.M.add_edge(0, 1)
cls.S = nx.Graph()
cls.S.add_edges_from([(0, 0), (1, 1)])

@ -1,139 +0,0 @@
from itertools import permutations
import pytest
import networkx as nx
from networkx.testing import almost_equal
class TestNeighborConnectivity:
def test_degree_p4(self):
G = nx.path_graph(4)
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.5}
nd = nx.average_degree_connectivity(D)
assert nd == answer
answer = {1: 2.0, 2: 1.5}
D = G.to_directed()
nd = nx.average_degree_connectivity(D, source="in", target="in")
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(D, source="in", target="in")
assert nd == answer
def test_degree_p4_weighted(self):
G = nx.path_graph(4)
G[1][2]["weight"] = 4
answer = {1: 2.0, 2: 1.8}
nd = nx.average_degree_connectivity(G, weight="weight")
assert nd == answer
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.8}
nd = nx.average_degree_connectivity(D, weight="weight")
assert nd == answer
answer = {1: 2.0, 2: 1.8}
D = G.to_directed()
nd = nx.average_degree_connectivity(
D, weight="weight", source="in", target="in"
)
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(
D, source="in", target="out", weight="weight"
)
assert nd == answer
def test_weight_keyword(self):
G = nx.path_graph(4)
G[1][2]["other"] = 4
answer = {1: 2.0, 2: 1.8}
nd = nx.average_degree_connectivity(G, weight="other")
assert nd == answer
answer = {1: 2.0, 2: 1.5}
nd = nx.average_degree_connectivity(G, weight=None)
assert nd == answer
D = G.to_directed()
answer = {2: 2.0, 4: 1.8}
nd = nx.average_degree_connectivity(D, weight="other")
assert nd == answer
answer = {1: 2.0, 2: 1.8}
D = G.to_directed()
nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in")
assert nd == answer
D = G.to_directed()
nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in")
assert nd == answer
def test_degree_barrat(self):
G = nx.star_graph(5)
G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)])
G[0][5]["weight"] = 5
nd = nx.average_degree_connectivity(G)[5]
assert nd == 1.8
nd = nx.average_degree_connectivity(G, weight="weight")[5]
assert almost_equal(nd, 3.222222, places=5)
nd = nx.k_nearest_neighbors(G, weight="weight")[5]
assert almost_equal(nd, 3.222222, places=5)
def test_zero_deg(self):
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(1, 3)
G.add_edge(1, 4)
c = nx.average_degree_connectivity(G)
assert c == {1: 0, 3: 1}
c = nx.average_degree_connectivity(G, source="in", target="in")
assert c == {0: 0, 1: 0}
c = nx.average_degree_connectivity(G, source="in", target="out")
assert c == {0: 0, 1: 3}
c = nx.average_degree_connectivity(G, source="in", target="in+out")
assert c == {0: 0, 1: 3}
c = nx.average_degree_connectivity(G, source="out", target="out")
assert c == {0: 0, 3: 0}
c = nx.average_degree_connectivity(G, source="out", target="in")
assert c == {0: 0, 3: 1}
c = nx.average_degree_connectivity(G, source="out", target="in+out")
assert c == {0: 0, 3: 1}
def test_in_out_weight(self):
G = nx.DiGraph()
G.add_edge(1, 2, weight=1)
G.add_edge(1, 3, weight=1)
G.add_edge(3, 1, weight=1)
for s, t in permutations(["in", "out", "in+out"], 2):
c = nx.average_degree_connectivity(G, source=s, target=t)
cw = nx.average_degree_connectivity(G, source=s, target=t, weight="weight")
assert c == cw
def test_invalid_source(self):
with pytest.raises(ValueError):
G = nx.DiGraph()
nx.average_degree_connectivity(G, source="bogus")
def test_invalid_target(self):
with pytest.raises(ValueError):
G = nx.DiGraph()
nx.average_degree_connectivity(G, target="bogus")
def test_single_node(self):
# TODO Is this really the intended behavior for providing a
# single node as the argument `nodes`? Shouldn't the function
# just return the connectivity value itself?
G = nx.trivial_graph()
conn = nx.average_degree_connectivity(G, nodes=0)
assert conn == {0: 0}

@ -1,76 +0,0 @@
import pytest
np = pytest.importorskip("numpy")
npt = pytest.importorskip("numpy.testing")
scipy = pytest.importorskip("scipy")
import networkx as nx
from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing
from networkx.algorithms.assortativity.correlation import attribute_ac
class TestDegreeMixingCorrelation(BaseTestDegreeMixing):
def test_degree_assortativity_undirected(self):
r = nx.degree_assortativity_coefficient(self.P4)
npt.assert_almost_equal(r, -1.0 / 2, decimal=4)
def test_degree_assortativity_directed(self):
r = nx.degree_assortativity_coefficient(self.D)
npt.assert_almost_equal(r, -0.57735, decimal=4)
def test_degree_assortativity_multigraph(self):
r = nx.degree_assortativity_coefficient(self.M)
npt.assert_almost_equal(r, -1.0 / 7.0, decimal=4)
def test_degree_pearson_assortativity_undirected(self):
r = nx.degree_pearson_correlation_coefficient(self.P4)
npt.assert_almost_equal(r, -1.0 / 2, decimal=4)
def test_degree_pearson_assortativity_directed(self):
r = nx.degree_pearson_correlation_coefficient(self.D)
npt.assert_almost_equal(r, -0.57735, decimal=4)
def test_degree_pearson_assortativity_multigraph(self):
r = nx.degree_pearson_correlation_coefficient(self.M)
npt.assert_almost_equal(r, -1.0 / 7.0, decimal=4)
class TestAttributeMixingCorrelation(BaseTestAttributeMixing):
def test_attribute_assortativity_undirected(self):
r = nx.attribute_assortativity_coefficient(self.G, "fish")
assert r == 6.0 / 22.0
def test_attribute_assortativity_directed(self):
r = nx.attribute_assortativity_coefficient(self.D, "fish")
assert r == 1.0 / 3.0
def test_attribute_assortativity_multigraph(self):
r = nx.attribute_assortativity_coefficient(self.M, "fish")
assert r == 1.0
def test_attribute_assortativity_coefficient(self):
# from "Mixing patterns in networks"
# fmt: off
a = np.array([[0.258, 0.016, 0.035, 0.013],
[0.012, 0.157, 0.058, 0.019],
[0.013, 0.023, 0.306, 0.035],
[0.005, 0.007, 0.024, 0.016]])
# fmt: on
r = attribute_ac(a)
npt.assert_almost_equal(r, 0.623, decimal=3)
def test_attribute_assortativity_coefficient2(self):
# fmt: off
a = np.array([[0.18, 0.02, 0.01, 0.03],
[0.02, 0.20, 0.03, 0.02],
[0.01, 0.03, 0.16, 0.01],
[0.03, 0.02, 0.01, 0.22]])
# fmt: on
r = attribute_ac(a)
npt.assert_almost_equal(r, 0.68, decimal=2)
def test_attribute_assortativity(self):
a = np.array([[50, 50, 0], [50, 50, 0], [0, 0, 2]])
r = attribute_ac(a)
npt.assert_almost_equal(r, 0.029, decimal=3)

@ -1,142 +0,0 @@
import pytest
np = pytest.importorskip("numpy")
npt = pytest.importorskip("numpy.testing")
import networkx as nx
from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing
class TestDegreeMixingDict(BaseTestDegreeMixing):
def test_degree_mixing_dict_undirected(self):
d = nx.degree_mixing_dict(self.P4)
d_result = {1: {2: 2}, 2: {1: 2, 2: 2}}
assert d == d_result
def test_degree_mixing_dict_undirected_normalized(self):
d = nx.degree_mixing_dict(self.P4, normalized=True)
d_result = {1: {2: 1.0 / 3}, 2: {1: 1.0 / 3, 2: 1.0 / 3}}
assert d == d_result
def test_degree_mixing_dict_directed(self):
d = nx.degree_mixing_dict(self.D)
print(d)
d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}}
assert d == d_result
def test_degree_mixing_dict_multigraph(self):
d = nx.degree_mixing_dict(self.M)
d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}}
assert d == d_result
class TestDegreeMixingMatrix(BaseTestDegreeMixing):
def test_degree_mixing_matrix_undirected(self):
# fmt: off
a_result = np.array([[0, 0, 0],
[0, 0, 2],
[0, 2, 2]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.P4, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.P4)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_directed(self):
# fmt: off
a_result = np.array([[0, 0, 0, 0],
[0, 0, 0, 2],
[0, 1, 0, 1],
[0, 0, 0, 0]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.D, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.D)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_multigraph(self):
# fmt: off
a_result = np.array([[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 3],
[0, 0, 3, 0]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.M, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.M)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_degree_mixing_matrix_selfloop(self):
# fmt: off
a_result = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 2]]
)
# fmt: on
a = nx.degree_mixing_matrix(self.S, normalized=False)
npt.assert_equal(a, a_result)
a = nx.degree_mixing_matrix(self.S)
npt.assert_equal(a, a_result / float(a_result.sum()))
class TestAttributeMixingDict(BaseTestAttributeMixing):
def test_attribute_mixing_dict_undirected(self):
d = nx.attribute_mixing_dict(self.G, "fish")
d_result = {
"one": {"one": 2, "red": 1},
"two": {"two": 2, "blue": 1},
"red": {"one": 1},
"blue": {"two": 1},
}
assert d == d_result
def test_attribute_mixing_dict_directed(self):
d = nx.attribute_mixing_dict(self.D, "fish")
d_result = {
"one": {"one": 1, "red": 1},
"two": {"two": 1, "blue": 1},
"red": {},
"blue": {},
}
assert d == d_result
def test_attribute_mixing_dict_multigraph(self):
d = nx.attribute_mixing_dict(self.M, "fish")
d_result = {"one": {"one": 4}, "two": {"two": 2}}
assert d == d_result
class TestAttributeMixingMatrix(BaseTestAttributeMixing):
def test_attribute_mixing_matrix_undirected(self):
mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]])
a = nx.attribute_mixing_matrix(
self.G, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_attribute_mixing_matrix_directed(self):
mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]])
a = nx.attribute_mixing_matrix(
self.D, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))
def test_attribute_mixing_matrix_multigraph(self):
mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
a = nx.attribute_mixing_matrix(
self.M, "fish", mapping=mapping, normalized=False
)
npt.assert_equal(a, a_result)
a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping)
npt.assert_equal(a, a_result / float(a_result.sum()))

@ -1,76 +0,0 @@
import networkx as nx
from networkx.testing import almost_equal
class TestAverageNeighbor:
def test_degree_p4(self):
G = nx.path_graph(4)
answer = {0: 2, 1: 1.5, 2: 1.5, 3: 2}
nd = nx.average_neighbor_degree(G)
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D)
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D)
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D, source="in", target="in")
assert nd == answer
def test_degree_p4_weighted(self):
G = nx.path_graph(4)
G[1][2]["weight"] = 4
answer = {0: 2, 1: 1.8, 2: 1.8, 3: 2}
nd = nx.average_neighbor_degree(G, weight="weight")
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D, weight="weight")
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D, weight="weight")
assert nd == answer
nd = nx.average_neighbor_degree(D, source="out", target="out", weight="weight")
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D, source="in", target="in", weight="weight")
assert nd == answer
def test_degree_k4(self):
G = nx.complete_graph(4)
answer = {0: 3, 1: 3, 2: 3, 3: 3}
nd = nx.average_neighbor_degree(G)
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D)
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D)
assert nd == answer
D = G.to_directed()
nd = nx.average_neighbor_degree(D, source="in", target="in")
assert nd == answer
def test_degree_k4_nodes(self):
G = nx.complete_graph(4)
answer = {1: 3.0, 2: 3.0}
nd = nx.average_neighbor_degree(G, nodes=[1, 2])
assert nd == answer
def test_degree_barrat(self):
G = nx.star_graph(5)
G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)])
G[0][5]["weight"] = 5
nd = nx.average_neighbor_degree(G)[5]
assert nd == 1.8
nd = nx.average_neighbor_degree(G, weight="weight")[5]
assert almost_equal(nd, 3.222222, places=5)

@ -1,86 +0,0 @@
import networkx as nx
from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing
class TestAttributeMixingXY(BaseTestAttributeMixing):
def test_node_attribute_xy_undirected(self):
attrxy = sorted(nx.node_attribute_xy(self.G, "fish"))
attrxy_result = sorted(
[
("one", "one"),
("one", "one"),
("two", "two"),
("two", "two"),
("one", "red"),
("red", "one"),
("blue", "two"),
("two", "blue"),
]
)
assert attrxy == attrxy_result
def test_node_attribute_xy_undirected_nodes(self):
attrxy = sorted(nx.node_attribute_xy(self.G, "fish", nodes=["one", "yellow"]))
attrxy_result = sorted([])
assert attrxy == attrxy_result
def test_node_attribute_xy_directed(self):
attrxy = sorted(nx.node_attribute_xy(self.D, "fish"))
attrxy_result = sorted(
[("one", "one"), ("two", "two"), ("one", "red"), ("two", "blue")]
)
assert attrxy == attrxy_result
def test_node_attribute_xy_multigraph(self):
attrxy = sorted(nx.node_attribute_xy(self.M, "fish"))
attrxy_result = [
("one", "one"),
("one", "one"),
("one", "one"),
("one", "one"),
("two", "two"),
("two", "two"),
]
assert attrxy == attrxy_result
def test_node_attribute_xy_selfloop(self):
attrxy = sorted(nx.node_attribute_xy(self.S, "fish"))
attrxy_result = [("one", "one"), ("two", "two")]
assert attrxy == attrxy_result
class TestDegreeMixingXY(BaseTestDegreeMixing):
def test_node_degree_xy_undirected(self):
xy = sorted(nx.node_degree_xy(self.P4))
xy_result = sorted([(1, 2), (2, 1), (2, 2), (2, 2), (1, 2), (2, 1)])
assert xy == xy_result
def test_node_degree_xy_undirected_nodes(self):
xy = sorted(nx.node_degree_xy(self.P4, nodes=[0, 1, -1]))
xy_result = sorted([(1, 2), (2, 1)])
assert xy == xy_result
def test_node_degree_xy_directed(self):
xy = sorted(nx.node_degree_xy(self.D))
xy_result = sorted([(2, 1), (2, 3), (1, 3), (1, 3)])
assert xy == xy_result
def test_node_degree_xy_multigraph(self):
xy = sorted(nx.node_degree_xy(self.M))
xy_result = sorted(
[(2, 3), (2, 3), (3, 2), (3, 2), (2, 3), (3, 2), (1, 2), (2, 1)]
)
assert xy == xy_result
def test_node_degree_xy_selfloop(self):
xy = sorted(nx.node_degree_xy(self.S))
xy_result = sorted([(2, 2), (2, 2)])
assert xy == xy_result
def test_node_degree_xy_weighted(self):
G = nx.Graph()
G.add_edge(1, 2, weight=7)
G.add_edge(2, 3, weight=10)
xy = sorted(nx.node_degree_xy(G, weight="weight"))
xy_result = sorted([(7, 17), (17, 10), (17, 7), (10, 17)])
assert xy == xy_result

@ -1,168 +0,0 @@
"""
Algorithms for asteroidal triples and asteroidal numbers in graphs.
An asteroidal triple in a graph G is a set of three non-adjacent vertices
u, v and w such that there exist a path between any two of them that avoids
closed neighborhood of the third. More formally, v_j, v_k belongs to the same
connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood
of v_i. A graph which does not contain any asteroidal triples is called
an AT-free graph. The class of AT-free graphs is a graph class for which
many NP-complete problems are solvable in polynomial time. Amongst them,
independent set and coloring.
"""
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ["is_at_free", "find_asteroidal_triple"]
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def find_asteroidal_triple(G):
r"""Find an asteroidal triple in the given graph.
An asteroidal triple is a triple of non-adjacent vertices such that
there exists a path between any two of them which avoids the closed
neighborhood of the third. It checks all independent triples of vertices
and whether they are an asteroidal triple or not. This is done with the
help of a data structure called a component structure.
A component structure encodes information about which vertices belongs to
the same connected component when the closed neighborhood of a given vertex
is removed from the graph. The algorithm used to check is the trivial
one, outlined in [1]_, which has a runtime of
:math:`O(|V||\overline{E} + |V||E|)`, where the second term is the
creation of the component structure.
Parameters
----------
G : NetworkX Graph
The graph to check whether is AT-free or not
Returns
-------
list or None
An asteroidal triple is returned as a list of nodes. If no asteroidal
triple exists, i.e. the graph is AT-free, then None is returned.
The returned value depends on the certificate parameter. The default
option is a bool which is True if the graph is AT-free, i.e. the
given graph contains no asteroidal triples, and False otherwise, i.e.
if the graph contains at least one asteroidal triple.
Notes
-----
The component structure and the algorithm is described in [1]_. The current
implementation implements the trivial algorithm for simple graphs.
References
----------
.. [1] Ekkehard Köhler,
"Recognizing Graphs without asteroidal triples",
Journal of Discrete Algorithms 2, pages 439-452, 2004.
https://www.sciencedirect.com/science/article/pii/S157086670400019X
"""
V = set(G.nodes)
if len(V) < 6:
# An asteroidal triple cannot exist in a graph with 5 or less vertices.
return None
component_structure = create_component_structure(G)
E_complement = set(nx.complement(G).edges)
for e in E_complement:
u = e[0]
v = e[1]
u_neighborhood = set(G[u]).union([u])
v_neighborhood = set(G[v]).union([v])
union_of_neighborhoods = u_neighborhood.union(v_neighborhood)
for w in V - union_of_neighborhoods:
"""Check for each pair of vertices whether they belong to the
same connected component when the closed neighborhood of the
third is removed."""
if (
component_structure[u][v] == component_structure[u][w]
and component_structure[v][u] == component_structure[v][w]
and component_structure[w][u] == component_structure[w][v]
):
return [u, v, w]
return None
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def is_at_free(G):
"""Check if a graph is AT-free.
The method uses the `find_asteroidal_triple` method to recognize
an AT-free graph. If no asteroidal triple is found the graph is
AT-free and True is returned. If at least one asteroidal triple is
found the graph is not AT-free and False is returned.
Parameters
----------
G : NetworkX Graph
The graph to check whether is AT-free or not.
Returns
-------
bool
True if G is AT-free and False otherwise.
Examples
--------
>>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)])
>>> nx.is_at_free(G)
True
>>> G = nx.cycle_graph(6)
>>> nx.is_at_free(G)
False
"""
return find_asteroidal_triple(G) is None
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def create_component_structure(G):
r"""Create component structure for G.
A *component structure* is an `nxn` array, denoted `c`, where `n` is
the number of vertices, where each row and column corresponds to a vertex.
.. math::
c_{uv} = \begin{cases} 0, if v \in N[u] \\
k, if v \in component k of G \setminus N[u] \end{cases}
Where `k` is an arbitrary label for each component. The structure is used
to simplify the detection of asteroidal triples.
Parameters
----------
G : NetworkX Graph
Undirected, simple graph.
Returns
-------
component_structure : dictionary
A dictionary of dictionaries, keyed by pairs of vertices.
"""
V = set(G.nodes)
component_structure = {}
for v in V:
label = 0
closed_neighborhood = set(G[v]).union({v})
row_dict = {}
for u in closed_neighborhood:
row_dict[u] = 0
G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood)
for cc in nx.connected_components(G_reduced):
label += 1
for u in cc:
row_dict[u] = label
component_structure[v] = row_dict
return component_structure

@ -1,86 +0,0 @@
r""" This module provides functions and operations for bipartite
graphs. Bipartite graphs `B = (U, V, E)` have two node sets `U,V` and edges in
`E` that only connect nodes from opposite sets. It is common in the literature
to use an spatial analogy referring to the two node sets as top and bottom nodes.
The bipartite algorithms are not imported into the networkx namespace
at the top level so the easiest way to use them is with:
>>> from networkx.algorithms import bipartite
NetworkX does not have a custom bipartite graph class but the Graph()
or DiGraph() classes can be used to represent bipartite graphs. However,
you have to keep track of which set each node belongs to, and make
sure that there is no edge between nodes of the same set. The convention used
in NetworkX is to use a node attribute named `bipartite` with values 0 or 1 to
identify the sets each node belongs to. This convention is not enforced in
the source code of bipartite functions, it's only a recommendation.
For example:
>>> B = nx.Graph()
>>> # Add nodes with the node attribute "bipartite"
>>> B.add_nodes_from([1, 2, 3, 4], bipartite=0)
>>> B.add_nodes_from(["a", "b", "c"], bipartite=1)
>>> # Add edges only between nodes of opposite node sets
>>> B.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")])
Many algorithms of the bipartite module of NetworkX require, as an argument, a
container with all the nodes that belong to one set, in addition to the bipartite
graph `B`. The functions in the bipartite package do not check that the node set
is actually correct nor that the input graph is actually bipartite.
If `B` is connected, you can find the two node sets using a two-coloring
algorithm:
>>> nx.is_connected(B)
True
>>> bottom_nodes, top_nodes = bipartite.sets(B)
However, if the input graph is not connected, there are more than one possible
colorations. This is the reason why we require the user to pass a container
with all nodes of one bipartite node set as an argument to most bipartite
functions. In the face of ambiguity, we refuse the temptation to guess and
raise an :exc:`AmbiguousSolution <networkx.AmbiguousSolution>`
Exception if the input graph for
:func:`bipartite.sets <networkx.algorithms.bipartite.basic.sets>`
is disconnected.
Using the `bipartite` node attribute, you can easily get the two node sets:
>>> top_nodes = {n for n, d in B.nodes(data=True) if d["bipartite"] == 0}
>>> bottom_nodes = set(B) - top_nodes
So you can easily use the bipartite algorithms that require, as an argument, a
container with all nodes that belong to one node set:
>>> print(round(bipartite.density(B, bottom_nodes), 2))
0.5
>>> G = bipartite.projected_graph(B, top_nodes)
All bipartite graph generators in NetworkX build bipartite graphs with the
`bipartite` node attribute. Thus, you can use the same approach:
>>> RB = bipartite.random_graph(5, 7, 0.2)
>>> RB_top = {n for n, d in RB.nodes(data=True) if d["bipartite"] == 0}
>>> RB_bottom = set(RB) - RB_top
>>> list(RB_top)
[0, 1, 2, 3, 4]
>>> list(RB_bottom)
[5, 6, 7, 8, 9, 10, 11]
For other bipartite graph generators see
:mod:`Generators <networkx.algorithms.bipartite.generators>`.
"""
from networkx.algorithms.bipartite.basic import *
from networkx.algorithms.bipartite.centrality import *
from networkx.algorithms.bipartite.cluster import *
from networkx.algorithms.bipartite.covering import *
from networkx.algorithms.bipartite.edgelist import *
from networkx.algorithms.bipartite.matching import *
from networkx.algorithms.bipartite.matrix import *
from networkx.algorithms.bipartite.projection import *
from networkx.algorithms.bipartite.redundancy import *
from networkx.algorithms.bipartite.spectral import *
from networkx.algorithms.bipartite.generators import *

@ -1,303 +0,0 @@
"""
==========================
Bipartite Graph Algorithms
==========================
"""
import networkx as nx
from networkx.algorithms.components import connected_components
__all__ = [
"is_bipartite",
"is_bipartite_node_set",
"color",
"sets",
"density",
"degrees",
]
def color(G):
"""Returns a two-coloring of the graph.
Raises an exception if the graph is not bipartite.
Parameters
----------
G : NetworkX graph
Returns
-------
color : dictionary
A dictionary keyed by node with a 1 or 0 as data for each node color.
Raises
------
NetworkXError
If the graph is not two-colorable.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> c = bipartite.color(G)
>>> print(c)
{0: 1, 1: 0, 2: 1, 3: 0}
You can use this to set a node attribute indicating the biparite set:
>>> nx.set_node_attributes(G, c, "bipartite")
>>> print(G.nodes[0]["bipartite"])
1
>>> print(G.nodes[1]["bipartite"])
0
"""
if G.is_directed():
import itertools
def neighbors(v):
return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)])
else:
neighbors = G.neighbors
color = {}
for n in G: # handle disconnected graphs
if n in color or len(G[n]) == 0: # skip isolates
continue
queue = [n]
color[n] = 1 # nodes seen with color (1 or 0)
while queue:
v = queue.pop()
c = 1 - color[v] # opposite color of node v
for w in neighbors(v):
if w in color:
if color[w] == color[v]:
raise nx.NetworkXError("Graph is not bipartite.")
else:
color[w] = c
queue.append(w)
# color isolates with 0
color.update(dict.fromkeys(nx.isolates(G), 0))
return color
def is_bipartite(G):
""" Returns True if graph G is bipartite, False if not.
Parameters
----------
G : NetworkX graph
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> print(bipartite.is_bipartite(G))
True
See Also
--------
color, is_bipartite_node_set
"""
try:
color(G)
return True
except nx.NetworkXError:
return False
def is_bipartite_node_set(G, nodes):
"""Returns True if nodes and G/nodes are a bipartition of G.
Parameters
----------
G : NetworkX graph
nodes: list or container
Check if nodes are a one of a bipartite set.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> X = set([1, 3])
>>> bipartite.is_bipartite_node_set(G, X)
True
Notes
-----
For connected graphs the bipartite sets are unique. This function handles
disconnected graphs.
"""
S = set(nodes)
for CC in (G.subgraph(c).copy() for c in connected_components(G)):
X, Y = sets(CC)
if not (
(X.issubset(S) and Y.isdisjoint(S)) or (Y.issubset(S) and X.isdisjoint(S))
):
return False
return True
def sets(G, top_nodes=None):
"""Returns bipartite node sets of graph G.
Raises an exception if the graph is not bipartite or if the input
graph is disconnected and thus more than one valid solution exists.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
Parameters
----------
G : NetworkX graph
top_nodes : container, optional
Container with all nodes in one bipartite node set. If not supplied
it will be computed. But if more than one solution exists an exception
will be raised.
Returns
-------
X : set
Nodes from one side of the bipartite graph.
Y : set
Nodes from the other side.
Raises
------
AmbiguousSolution
Raised if the input bipartite graph is disconnected and no container
with all nodes in one bipartite set is provided. When determining
the nodes in each bipartite set more than one valid solution is
possible if the input graph is disconnected.
NetworkXError
Raised if the input graph is not bipartite.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> X, Y = bipartite.sets(G)
>>> list(X)
[0, 2]
>>> list(Y)
[1, 3]
See Also
--------
color
"""
if G.is_directed():
is_connected = nx.is_weakly_connected
else:
is_connected = nx.is_connected
if top_nodes is not None:
X = set(top_nodes)
Y = set(G) - X
else:
if not is_connected(G):
msg = "Disconnected graph: Ambiguous solution for bipartite sets."
raise nx.AmbiguousSolution(msg)
c = color(G)
X = {n for n, is_top in c.items() if is_top}
Y = {n for n, is_top in c.items() if not is_top}
return (X, Y)
def density(B, nodes):
"""Returns density of bipartite graph B.
Parameters
----------
G : NetworkX graph
nodes: list or container
Nodes in one node set of the bipartite graph.
Returns
-------
d : float
The bipartite density
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.complete_bipartite_graph(3, 2)
>>> X = set([0, 1, 2])
>>> bipartite.density(G, X)
1.0
>>> Y = set([3, 4])
>>> bipartite.density(G, Y)
1.0
Notes
-----
The container of nodes passed as argument must contain all nodes
in one of the two bipartite node sets to avoid ambiguity in the
case of disconnected graphs.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
color
"""
n = len(B)
m = nx.number_of_edges(B)
nb = len(nodes)
nt = n - nb
if m == 0: # includes cases n==0 and n==1
d = 0.0
else:
if B.is_directed():
d = m / (2.0 * float(nb * nt))
else:
d = m / float(nb * nt)
return d
def degrees(B, nodes, weight=None):
"""Returns the degrees of the two node sets in the bipartite graph B.
Parameters
----------
G : NetworkX graph
nodes: list or container
Nodes in one node set of the bipartite graph.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
(degX,degY) : tuple of dictionaries
The degrees of the two bipartite sets as dictionaries keyed by node.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.complete_bipartite_graph(3, 2)
>>> Y = set([3, 4])
>>> degX, degY = bipartite.degrees(G, Y)
>>> dict(degX)
{0: 2, 1: 2, 2: 2}
Notes
-----
The container of nodes passed as argument must contain all nodes
in one of the two bipartite node sets to avoid ambiguity in the
case of disconnected graphs.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
color, density
"""
bottom = set(nodes)
top = set(B) - bottom
return (B.degree(top, weight), B.degree(bottom, weight))

@ -1,268 +0,0 @@
import networkx as nx
__all__ = ["degree_centrality", "betweenness_centrality", "closeness_centrality"]
def degree_centrality(G, nodes):
r"""Compute the degree centrality for nodes in a bipartite network.
The degree centrality for a node `v` is the fraction of nodes
connected to it.
Parameters
----------
G : graph
A bipartite network
nodes : list or container
Container with all nodes in one bipartite node set.
Returns
-------
centrality : dictionary
Dictionary keyed by node with bipartite degree centrality as the value.
See Also
--------
betweenness_centrality,
closeness_centrality,
sets,
is_bipartite
Notes
-----
The nodes input parameter must contain all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both bipartite node
sets. See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
For unipartite networks, the degree centrality values are
normalized by dividing by the maximum possible degree (which is
`n-1` where `n` is the number of nodes in G).
In the bipartite case, the maximum possible degree of a node in a
bipartite node set is the number of nodes in the opposite node set
[1]_. The degree centrality for a node `v` in the bipartite
sets `U` with `n` nodes and `V` with `m` nodes is
.. math::
d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U ,
d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V ,
where `deg(v)` is the degree of node `v`.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/research/publications/bhaffiliations.pdf
"""
top = set(nodes)
bottom = set(G) - top
s = 1.0 / len(bottom)
centrality = {n: d * s for n, d in G.degree(top)}
s = 1.0 / len(top)
centrality.update({n: d * s for n, d in G.degree(bottom)})
return centrality
def betweenness_centrality(G, nodes):
r"""Compute betweenness centrality for nodes in a bipartite network.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`.
Values of betweenness are normalized by the maximum possible
value which for bipartite graphs is limited by the relative size
of the two node sets [1]_.
Let `n` be the number of nodes in the node set `U` and
`m` be the number of nodes in the node set `V`, then
nodes in `U` are normalized by dividing by
.. math::
\frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,
where
.. math::
s = (n - 1) \div m , t = (n - 1) \mod m ,
and nodes in `V` are normalized by dividing by
.. math::
\frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,
where,
.. math::
p = (m - 1) \div n , r = (m - 1) \mod n .
Parameters
----------
G : graph
A bipartite graph
nodes : list or container
Container with all nodes in one bipartite node set.
Returns
-------
betweenness : dictionary
Dictionary keyed by node with bipartite betweenness centrality
as the value.
See Also
--------
degree_centrality,
closeness_centrality,
sets,
is_bipartite
Notes
-----
The nodes input parameter must contain all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both node sets.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/research/publications/bhaffiliations.pdf
"""
top = set(nodes)
bottom = set(G) - top
n = float(len(top))
m = float(len(bottom))
s = (n - 1) // m
t = (n - 1) % m
bet_max_top = (
((m ** 2) * ((s + 1) ** 2))
+ (m * (s + 1) * (2 * t - s - 1))
- (t * ((2 * s) - t + 3))
) / 2.0
p = (m - 1) // n
r = (m - 1) % n
bet_max_bot = (
((n ** 2) * ((p + 1) ** 2))
+ (n * (p + 1) * (2 * r - p - 1))
- (r * ((2 * p) - r + 3))
) / 2.0
betweenness = nx.betweenness_centrality(G, normalized=False, weight=None)
for node in top:
betweenness[node] /= bet_max_top
for node in bottom:
betweenness[node] /= bet_max_bot
return betweenness
def closeness_centrality(G, nodes, normalized=True):
r"""Compute the closeness centrality for nodes in a bipartite network.
The closeness of a node is the distance to all other nodes in the
graph or in the case that the graph is not connected to all other nodes
in the connected component containing that node.
Parameters
----------
G : graph
A bipartite network
nodes : list or container
Container with all nodes in one bipartite node set.
normalized : bool, optional
If True (default) normalize by connected component size.
Returns
-------
closeness : dictionary
Dictionary keyed by node with bipartite closeness centrality
as the value.
See Also
--------
betweenness_centrality,
degree_centrality
sets,
is_bipartite
Notes
-----
The nodes input parameter must contain all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both node sets.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
Closeness centrality is normalized by the minimum distance possible.
In the bipartite case the minimum distance for a node in one bipartite
node set is 1 from all nodes in the other node set and 2 from all
other nodes in its own set [1]_. Thus the closeness centrality
for node `v` in the two bipartite sets `U` with
`n` nodes and `V` with `m` nodes is
.. math::
c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U,
c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V,
where `d` is the sum of the distances from `v` to all
other nodes.
Higher values of closeness indicate higher centrality.
As in the unipartite case, setting normalized=True causes the
values to normalized further to n-1 / size(G)-1 where n is the
number of nodes in the connected part of graph containing the
node. If the graph is not completely connected, this algorithm
computes the closeness centrality for each connected part
separately.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/research/publications/bhaffiliations.pdf
"""
closeness = {}
path_length = nx.single_source_shortest_path_length
top = set(nodes)
bottom = set(G) - top
n = float(len(top))
m = float(len(bottom))
for node in top:
sp = dict(path_length(G, node))
totsp = sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness[node] = (m + 2 * (n - 1)) / totsp
if normalized:
s = (len(sp) - 1.0) / (len(G) - 1)
closeness[node] *= s
else:
closeness[n] = 0.0
for node in bottom:
sp = dict(path_length(G, node))
totsp = sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness[node] = (n + 2 * (m - 1)) / totsp
if normalized:
s = (len(sp) - 1.0) / (len(G) - 1)
closeness[node] *= s
else:
closeness[n] = 0.0
return closeness

@ -1,276 +0,0 @@
"""Functions for computing clustering of pairs
"""
import itertools
import networkx as nx
__all__ = [
"clustering",
"average_clustering",
"latapy_clustering",
"robins_alexander_clustering",
]
def cc_dot(nu, nv):
return float(len(nu & nv)) / len(nu | nv)
def cc_max(nu, nv):
return float(len(nu & nv)) / max(len(nu), len(nv))
def cc_min(nu, nv):
return float(len(nu & nv)) / min(len(nu), len(nv))
modes = {"dot": cc_dot, "min": cc_min, "max": cc_max}
def latapy_clustering(G, nodes=None, mode="dot"):
r"""Compute a bipartite clustering coefficient for nodes.
The bipartie clustering coefficient is a measure of local density
of connections defined as [1]_:
.. math::
c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|}
where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`,
and `c_{uv}` is the pairwise clustering coefficient between nodes
`u` and `v`.
The mode selects the function for `c_{uv}` which can be:
`dot`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|}
`min`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)}
`max`:
.. math::
c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)}
Parameters
----------
G : graph
A bipartite graph
nodes : list or iterable (optional)
Compute bipartite clustering for these nodes. The default
is all nodes in G.
mode : string
The pariwise bipartite clustering method to be used in the computation.
It must be "dot", "max", or "min".
Returns
-------
clustering : dictionary
A dictionary keyed by node with the clustering coefficient value.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4) # path graphs are bipartite
>>> c = bipartite.clustering(G)
>>> c[0]
0.5
>>> c = bipartite.clustering(G, mode="min")
>>> c[0]
1.0
See Also
--------
robins_alexander_clustering
square_clustering
average_clustering
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if not nx.algorithms.bipartite.is_bipartite(G):
raise nx.NetworkXError("Graph is not bipartite")
try:
cc_func = modes[mode]
except KeyError as e:
raise nx.NetworkXError(
"Mode for bipartite clustering must be: dot, min or max"
) from e
if nodes is None:
nodes = G
ccs = {}
for v in nodes:
cc = 0.0
nbrs2 = {u for nbr in G[v] for u in G[nbr]} - {v}
for u in nbrs2:
cc += cc_func(set(G[u]), set(G[v]))
if cc > 0.0: # len(nbrs2)>0
cc /= len(nbrs2)
ccs[v] = cc
return ccs
clustering = latapy_clustering
def average_clustering(G, nodes=None, mode="dot"):
r"""Compute the average bipartite clustering coefficient.
A clustering coefficient for the whole graph is the average,
.. math::
C = \frac{1}{n}\sum_{v \in G} c_v,
where `n` is the number of nodes in `G`.
Similar measures for the two bipartite sets can be defined [1]_
.. math::
C_X = \frac{1}{|X|}\sum_{v \in X} c_v,
where `X` is a bipartite set of `G`.
Parameters
----------
G : graph
a bipartite graph
nodes : list or iterable, optional
A container of nodes to use in computing the average.
The nodes should be either the entire graph (the default) or one of the
bipartite sets.
mode : string
The pariwise bipartite clustering method.
It must be "dot", "max", or "min"
Returns
-------
clustering : float
The average bipartite clustering for the given set of nodes or the
entire graph if no nodes are specified.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.star_graph(3) # star graphs are bipartite
>>> bipartite.average_clustering(G)
0.75
>>> X, Y = bipartite.sets(G)
>>> bipartite.average_clustering(G, X)
0.0
>>> bipartite.average_clustering(G, Y)
1.0
See Also
--------
clustering
Notes
-----
The container of nodes passed to this function must contain all of the nodes
in one of the bipartite sets ("top" or "bottom") in order to compute
the correct average bipartite clustering coefficients.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
References
----------
.. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
Basic notions for the analysis of large two-mode networks.
Social Networks 30(1), 31--48.
"""
if nodes is None:
nodes = G
ccs = latapy_clustering(G, nodes=nodes, mode=mode)
return float(sum(ccs[v] for v in nodes)) / len(nodes)
def robins_alexander_clustering(G):
r"""Compute the bipartite clustering of G.
Robins and Alexander [1]_ defined bipartite clustering coefficient as
four times the number of four cycles `C_4` divided by the number of
three paths `L_3` in a bipartite graph:
.. math::
CC_4 = \frac{4 * C_4}{L_3}
Parameters
----------
G : graph
a bipartite graph
Returns
-------
clustering : float
The Robins and Alexander bipartite clustering for the input graph.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.davis_southern_women_graph()
>>> print(round(bipartite.robins_alexander_clustering(G), 3))
0.468
See Also
--------
latapy_clustering
square_clustering
References
----------
.. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking
directors: Network structure and distance in bipartite graphs.
Computational & Mathematical Organization Theory 10(1), 6994.
"""
if G.order() < 4 or G.size() < 3:
return 0
L_3 = _threepaths(G)
if L_3 == 0:
return 0
C_4 = _four_cycles(G)
return (4.0 * C_4) / L_3
def _four_cycles(G):
cycles = 0
for v in G:
for u, w in itertools.combinations(G[v], 2):
cycles += len((set(G[u]) & set(G[w])) - {v})
return cycles / 4
def _threepaths(G):
paths = 0
for v in G:
for u in G[v]:
for w in set(G[u]) - {v}:
paths += len(set(G[w]) - {v, u})
# Divide by two because we count each three path twice
# one for each possible starting point
return paths / 2

@ -1,55 +0,0 @@
""" Functions related to graph covers."""
from networkx.utils import not_implemented_for
from networkx.algorithms.bipartite.matching import hopcroft_karp_matching
from networkx.algorithms.covering import min_edge_cover as _min_edge_cover
__all__ = ["min_edge_cover"]
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def min_edge_cover(G, matching_algorithm=None):
"""Returns a set of edges which constitutes
the minimum edge cover of the graph.
The smallest edge cover can be found in polynomial time by finding
a maximum matching and extending it greedily so that all nodes
are covered.
Parameters
----------
G : NetworkX graph
An undirected bipartite graph.
matching_algorithm : function
A function that returns a maximum cardinality matching in a
given bipartite graph. The function must take one input, the
graph ``G``, and return a dictionary mapping each node to its
mate. If not specified,
:func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching`
will be used. Other possibilities include
:func:`~networkx.algorithms.bipartite.matching.eppstein_matching`,
Returns
-------
set
A set of the edges in a minimum edge cover of the graph, given as
pairs of nodes. It contains both the edges `(u, v)` and `(v, u)`
for given nodes `u` and `v` among the edges of minimum edge cover.
Notes
-----
An edge cover of a graph is a set of edges such that every node of
the graph is incident to at least one edge of the set.
A minimum edge cover is an edge covering of smallest cardinality.
Due to its implementation, the worst-case running time of this algorithm
is bounded by the worst-case running time of the function
``matching_algorithm``.
"""
if G.order() == 0: # Special case for the empty graph
return set()
if matching_algorithm is None:
matching_algorithm = hopcroft_karp_matching
return _min_edge_cover(G, matching_algorithm=matching_algorithm)

@ -1,357 +0,0 @@
"""
********************
Bipartite Edge Lists
********************
Read and write NetworkX graphs as bipartite edge lists.
Format
------
You can read or write three formats of edge lists with these functions.
Node pairs with no data::
1 2
Python dictionary as data::
1 2 {'weight':7, 'color':'green'}
Arbitrary data::
1 2 7 green
For each edge (u, v) the node u is assigned to part 0 and the node v to part 1.
"""
__all__ = ["generate_edgelist", "write_edgelist", "parse_edgelist", "read_edgelist"]
import networkx as nx
from networkx.utils import open_file, not_implemented_for
@open_file(1, mode="wb")
def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"):
"""Write a bipartite graph as a list of edges.
Parameters
----------
G : Graph
A NetworkX bipartite graph
path : file or string
File or filename to write. If a file is provided, it must be
opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed.
comments : string, optional
The character used to indicate the start of a comment
delimiter : string, optional
The string used to separate values. The default is whitespace.
data : bool or list, optional
If False write no edge data.
If True write a string representation of the edge data dictionary..
If a list (or other iterable) is provided, write the keys specified
in the list.
encoding: string, optional
Specify which encoding to use when writing file.
Examples
--------
>>> G = nx.path_graph(4)
>>> G.add_nodes_from([0, 2], bipartite=0)
>>> G.add_nodes_from([1, 3], bipartite=1)
>>> nx.write_edgelist(G, "test.edgelist")
>>> fh = open("test.edgelist", "wb")
>>> nx.write_edgelist(G, fh)
>>> nx.write_edgelist(G, "test.edgelist.gz")
>>> nx.write_edgelist(G, "test.edgelist.gz", data=False)
>>> G = nx.Graph()
>>> G.add_edge(1, 2, weight=7, color="red")
>>> nx.write_edgelist(G, "test.edgelist", data=False)
>>> nx.write_edgelist(G, "test.edgelist", data=["color"])
>>> nx.write_edgelist(G, "test.edgelist", data=["color", "weight"])
See Also
--------
write_edgelist()
generate_edgelist()
"""
for line in generate_edgelist(G, delimiter, data):
line += "\n"
path.write(line.encode(encoding))
@not_implemented_for("directed")
def generate_edgelist(G, delimiter=" ", data=True):
"""Generate a single line of the bipartite graph G in edge list format.
Parameters
----------
G : NetworkX graph
The graph is assumed to have node attribute `part` set to 0,1 representing
the two graph parts
delimiter : string, optional
Separator for node labels
data : bool or list of keys
If False generate no edge data. If True use a dictionary
representation of edge data. If a list of keys use a list of data
values corresponding to the keys.
Returns
-------
lines : string
Lines of data in adjlist format.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> G.add_nodes_from([0, 2], bipartite=0)
>>> G.add_nodes_from([1, 3], bipartite=1)
>>> G[1][2]["weight"] = 3
>>> G[2][3]["capacity"] = 12
>>> for line in bipartite.generate_edgelist(G, data=False):
... print(line)
0 1
2 1
2 3
>>> for line in bipartite.generate_edgelist(G):
... print(line)
0 1 {}
2 1 {'weight': 3}
2 3 {'capacity': 12}
>>> for line in bipartite.generate_edgelist(G, data=["weight"]):
... print(line)
0 1
2 1 3
2 3
"""
try:
part0 = [n for n, d in G.nodes.items() if d["bipartite"] == 0]
except BaseException as e:
raise AttributeError("Missing node attribute `bipartite`") from e
if data is True or data is False:
for n in part0:
for e in G.edges(n, data=data):
yield delimiter.join(map(str, e))
else:
for n in part0:
for u, v, d in G.edges(n, data=True):
e = [u, v]
try:
e.extend(d[k] for k in data)
except KeyError:
pass # missing data for this edge, should warn?
yield delimiter.join(map(str, e))
def parse_edgelist(
lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True
):
"""Parse lines of an edge list representation of a bipartite graph.
Parameters
----------
lines : list or iterator of strings
Input data in edgelist format
comments : string, optional
Marker for comment lines
delimiter : string, optional
Separator for node labels
create_using: NetworkX graph container, optional
Use given NetworkX graph for holding nodes or edges.
nodetype : Python type, optional
Convert nodes to this type.
data : bool or list of (label,type) tuples
If False generate no edge data or if True use a dictionary
representation of edge data or a list tuples specifying dictionary
key names and types for edge data.
Returns
-------
G: NetworkX Graph
The bipartite graph corresponding to lines
Examples
--------
Edgelist with no data:
>>> from networkx.algorithms import bipartite
>>> lines = ["1 2", "2 3", "3 4"]
>>> G = bipartite.parse_edgelist(lines, nodetype=int)
>>> sorted(G.nodes())
[1, 2, 3, 4]
>>> sorted(G.nodes(data=True))
[(1, {'bipartite': 0}), (2, {'bipartite': 0}), (3, {'bipartite': 0}), (4, {'bipartite': 1})]
>>> sorted(G.edges())
[(1, 2), (2, 3), (3, 4)]
Edgelist with data in Python dictionary representation:
>>> lines = ["1 2 {'weight':3}", "2 3 {'weight':27}", "3 4 {'weight':3.0}"]
>>> G = bipartite.parse_edgelist(lines, nodetype=int)
>>> sorted(G.nodes())
[1, 2, 3, 4]
>>> sorted(G.edges(data=True))
[(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})]
Edgelist with data in a list:
>>> lines = ["1 2 3", "2 3 27", "3 4 3.0"]
>>> G = bipartite.parse_edgelist(lines, nodetype=int, data=(("weight", float),))
>>> sorted(G.nodes())
[1, 2, 3, 4]
>>> sorted(G.edges(data=True))
[(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})]
See Also
--------
"""
from ast import literal_eval
G = nx.empty_graph(0, create_using)
for line in lines:
p = line.find(comments)
if p >= 0:
line = line[:p]
if not len(line):
continue
# split line, should have 2 or more
s = line.strip().split(delimiter)
if len(s) < 2:
continue
u = s.pop(0)
v = s.pop(0)
d = s
if nodetype is not None:
try:
u = nodetype(u)
v = nodetype(v)
except BaseException as e:
raise TypeError(
f"Failed to convert nodes {u},{v} " f"to type {nodetype}."
) from e
if len(d) == 0 or data is False:
# no data or data type specified
edgedata = {}
elif data is True:
# no edge types specified
try: # try to evaluate as dictionary
edgedata = dict(literal_eval(" ".join(d)))
except BaseException as e:
raise TypeError(
f"Failed to convert edge data ({d})" f"to dictionary."
) from e
else:
# convert edge data to dictionary with specified keys and type
if len(d) != len(data):
raise IndexError(
f"Edge data {d} and data_keys {data} are not the same length"
)
edgedata = {}
for (edge_key, edge_type), edge_value in zip(data, d):
try:
edge_value = edge_type(edge_value)
except BaseException as e:
raise TypeError(
f"Failed to convert {edge_key} data "
f"{edge_value} to type {edge_type}."
) from e
edgedata.update({edge_key: edge_value})
G.add_node(u, bipartite=0)
G.add_node(v, bipartite=1)
G.add_edge(u, v, **edgedata)
return G
@open_file(0, mode="rb")
def read_edgelist(
path,
comments="#",
delimiter=None,
create_using=None,
nodetype=None,
data=True,
edgetype=None,
encoding="utf-8",
):
"""Read a bipartite graph from a list of edges.
Parameters
----------
path : file or string
File or filename to read. If a file is provided, it must be
opened in 'rb' mode.
Filenames ending in .gz or .bz2 will be uncompressed.
comments : string, optional
The character used to indicate the start of a comment.
delimiter : string, optional
The string used to separate values. The default is whitespace.
create_using : Graph container, optional,
Use specified container to build graph. The default is networkx.Graph,
an undirected graph.
nodetype : int, float, str, Python type, optional
Convert node data from strings to specified type
data : bool or list of (label,type) tuples
Tuples specifying dictionary key names and types for edge data
edgetype : int, float, str, Python type, optional OBSOLETE
Convert edge data from strings to specified type and use as 'weight'
encoding: string, optional
Specify which encoding to use when reading file.
Returns
-------
G : graph
A networkx Graph or other type specified with create_using
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> G.add_nodes_from([0, 2], bipartite=0)
>>> G.add_nodes_from([1, 3], bipartite=1)
>>> bipartite.write_edgelist(G, "test.edgelist")
>>> G = bipartite.read_edgelist("test.edgelist")
>>> fh = open("test.edgelist", "rb")
>>> G = bipartite.read_edgelist(fh)
>>> fh.close()
>>> G = bipartite.read_edgelist("test.edgelist", nodetype=int)
Edgelist with data in a list:
>>> textline = "1 2 3"
>>> fh = open("test.edgelist", "w")
>>> d = fh.write(textline)
>>> fh.close()
>>> G = bipartite.read_edgelist(
... "test.edgelist", nodetype=int, data=(("weight", float),)
... )
>>> list(G)
[1, 2]
>>> list(G.edges(data=True))
[(1, 2, {'weight': 3.0})]
See parse_edgelist() for more examples of formatting.
See Also
--------
parse_edgelist
Notes
-----
Since nodes must be hashable, the function nodetype must return hashable
types (e.g. int, float, str, frozenset - or tuples of those, etc.)
"""
lines = (line.decode(encoding) for line in path)
return parse_edgelist(
lines,
comments=comments,
delimiter=delimiter,
create_using=create_using,
nodetype=nodetype,
data=data,
)

@ -1,595 +0,0 @@
"""
Generators and functions for bipartite graphs.
"""
import math
import numbers
from functools import reduce
import networkx as nx
from networkx.utils import nodes_or_number, py_random_state
__all__ = [
"configuration_model",
"havel_hakimi_graph",
"reverse_havel_hakimi_graph",
"alternating_havel_hakimi_graph",
"preferential_attachment_graph",
"random_graph",
"gnmk_random_graph",
"complete_bipartite_graph",
]
@nodes_or_number([0, 1])
def complete_bipartite_graph(n1, n2, create_using=None):
"""Returns the complete bipartite graph `K_{n_1,n_2}`.
The graph is composed of two partitions with nodes 0 to (n1 - 1)
in the first and nodes n1 to (n1 + n2 - 1) in the second.
Each node in the first is connected to each node in the second.
Parameters
----------
n1 : integer
Number of nodes for node set A.
n2 : integer
Number of nodes for node set B.
create_using : NetworkX graph instance, optional
Return graph of this type.
Notes
-----
Node labels are the integers 0 to `n_1 + n_2 - 1`.
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
To use it use nx.bipartite.complete_bipartite_graph
"""
G = nx.empty_graph(0, create_using)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
n1, top = n1
n2, bottom = n2
if isinstance(n2, numbers.Integral):
bottom = [n1 + i for i in bottom]
G.add_nodes_from(top, bipartite=0)
G.add_nodes_from(bottom, bipartite=1)
G.add_edges_from((u, v) for u in top for v in bottom)
G.graph["name"] = f"complete_bipartite_graph({n1},{n2})"
return G
@py_random_state(3)
def configuration_model(aseq, bseq, create_using=None, seed=None):
"""Returns a random bipartite graph from two given degree sequences.
Parameters
----------
aseq : list
Degree sequence for node set A.
bseq : list
Degree sequence for node set B.
create_using : NetworkX graph instance, optional
Return graph of this type.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
The graph is composed of two partitions. Set A has nodes 0 to
(len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
Nodes from set A are connected to nodes in set B by choosing
randomly from the possible free stubs, one in A and one in B.
Notes
-----
The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
If no graph type is specified use MultiGraph with parallel edges.
If you want a graph with no parallel edges use create_using=Graph()
but then the resulting degree sequences might not be exact.
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
To use it use nx.bipartite.configuration_model
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
# length and sum of each sequence
lena = len(aseq)
lenb = len(bseq)
suma = sum(aseq)
sumb = sum(bseq)
if not suma == sumb:
raise nx.NetworkXError(
f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
)
G = _add_nodes_with_bipartite_label(G, lena, lenb)
if len(aseq) == 0 or max(aseq) == 0:
return G # done if no edges
# build lists of degree-repeated vertex numbers
stubs = []
stubs.extend([[v] * aseq[v] for v in range(0, lena)])
astubs = []
astubs = [x for subseq in stubs for x in subseq]
stubs = []
stubs.extend([[v] * bseq[v - lena] for v in range(lena, lena + lenb)])
bstubs = []
bstubs = [x for subseq in stubs for x in subseq]
# shuffle lists
seed.shuffle(astubs)
seed.shuffle(bstubs)
G.add_edges_from([[astubs[i], bstubs[i]] for i in range(suma)])
G.name = "bipartite_configuration_model"
return G
def havel_hakimi_graph(aseq, bseq, create_using=None):
"""Returns a bipartite graph from two given degree sequences using a
Havel-Hakimi style construction.
The graph is composed of two partitions. Set A has nodes 0 to
(len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
Nodes from the set A are connected to nodes in the set B by
connecting the highest degree nodes in set A to the highest degree
nodes in set B until all stubs are connected.
Parameters
----------
aseq : list
Degree sequence for node set A.
bseq : list
Degree sequence for node set B.
create_using : NetworkX graph instance, optional
Return graph of this type.
Notes
-----
The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
If no graph type is specified use MultiGraph with parallel edges.
If you want a graph with no parallel edges use create_using=Graph()
but then the resulting degree sequences might not be exact.
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
To use it use nx.bipartite.havel_hakimi_graph
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
# length of the each sequence
naseq = len(aseq)
nbseq = len(bseq)
suma = sum(aseq)
sumb = sum(bseq)
if not suma == sumb:
raise nx.NetworkXError(
f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
)
G = _add_nodes_with_bipartite_label(G, naseq, nbseq)
if len(aseq) == 0 or max(aseq) == 0:
return G # done if no edges
# build list of degree-repeated vertex numbers
astubs = [[aseq[v], v] for v in range(0, naseq)]
bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)]
astubs.sort()
while astubs:
(degree, u) = astubs.pop() # take of largest degree node in the a set
if degree == 0:
break # done, all are zero
# connect the source to largest degree nodes in the b set
bstubs.sort()
for target in bstubs[-degree:]:
v = target[1]
G.add_edge(u, v)
target[0] -= 1 # note this updates bstubs too.
if target[0] == 0:
bstubs.remove(target)
G.name = "bipartite_havel_hakimi_graph"
return G
def reverse_havel_hakimi_graph(aseq, bseq, create_using=None):
"""Returns a bipartite graph from two given degree sequences using a
Havel-Hakimi style construction.
The graph is composed of two partitions. Set A has nodes 0 to
(len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
Nodes from set A are connected to nodes in the set B by connecting
the highest degree nodes in set A to the lowest degree nodes in
set B until all stubs are connected.
Parameters
----------
aseq : list
Degree sequence for node set A.
bseq : list
Degree sequence for node set B.
create_using : NetworkX graph instance, optional
Return graph of this type.
Notes
-----
The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
If no graph type is specified use MultiGraph with parallel edges.
If you want a graph with no parallel edges use create_using=Graph()
but then the resulting degree sequences might not be exact.
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
To use it use nx.bipartite.reverse_havel_hakimi_graph
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
# length of the each sequence
lena = len(aseq)
lenb = len(bseq)
suma = sum(aseq)
sumb = sum(bseq)
if not suma == sumb:
raise nx.NetworkXError(
f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
)
G = _add_nodes_with_bipartite_label(G, lena, lenb)
if len(aseq) == 0 or max(aseq) == 0:
return G # done if no edges
# build list of degree-repeated vertex numbers
astubs = [[aseq[v], v] for v in range(0, lena)]
bstubs = [[bseq[v - lena], v] for v in range(lena, lena + lenb)]
astubs.sort()
bstubs.sort()
while astubs:
(degree, u) = astubs.pop() # take of largest degree node in the a set
if degree == 0:
break # done, all are zero
# connect the source to the smallest degree nodes in the b set
for target in bstubs[0:degree]:
v = target[1]
G.add_edge(u, v)
target[0] -= 1 # note this updates bstubs too.
if target[0] == 0:
bstubs.remove(target)
G.name = "bipartite_reverse_havel_hakimi_graph"
return G
def alternating_havel_hakimi_graph(aseq, bseq, create_using=None):
"""Returns a bipartite graph from two given degree sequences using
an alternating Havel-Hakimi style construction.
The graph is composed of two partitions. Set A has nodes 0 to
(len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
Nodes from the set A are connected to nodes in the set B by
connecting the highest degree nodes in set A to alternatively the
highest and the lowest degree nodes in set B until all stubs are
connected.
Parameters
----------
aseq : list
Degree sequence for node set A.
bseq : list
Degree sequence for node set B.
create_using : NetworkX graph instance, optional
Return graph of this type.
Notes
-----
The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
If no graph type is specified use MultiGraph with parallel edges.
If you want a graph with no parallel edges use create_using=Graph()
but then the resulting degree sequences might not be exact.
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
To use it use nx.bipartite.alternating_havel_hakimi_graph
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
# length of the each sequence
naseq = len(aseq)
nbseq = len(bseq)
suma = sum(aseq)
sumb = sum(bseq)
if not suma == sumb:
raise nx.NetworkXError(
f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
)
G = _add_nodes_with_bipartite_label(G, naseq, nbseq)
if len(aseq) == 0 or max(aseq) == 0:
return G # done if no edges
# build list of degree-repeated vertex numbers
astubs = [[aseq[v], v] for v in range(0, naseq)]
bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)]
while astubs:
astubs.sort()
(degree, u) = astubs.pop() # take of largest degree node in the a set
if degree == 0:
break # done, all are zero
bstubs.sort()
small = bstubs[0 : degree // 2] # add these low degree targets
large = bstubs[(-degree + degree // 2) :] # now high degree targets
stubs = [x for z in zip(large, small) for x in z] # combine, sorry
if len(stubs) < len(small) + len(large): # check for zip truncation
stubs.append(large.pop())
for target in stubs:
v = target[1]
G.add_edge(u, v)
target[0] -= 1 # note this updates bstubs too.
if target[0] == 0:
bstubs.remove(target)
G.name = "bipartite_alternating_havel_hakimi_graph"
return G
@py_random_state(3)
def preferential_attachment_graph(aseq, p, create_using=None, seed=None):
"""Create a bipartite graph with a preferential attachment model from
a given single degree sequence.
The graph is composed of two partitions. Set A has nodes 0 to
(len(aseq) - 1) and set B has nodes starting with node len(aseq).
The number of nodes in set B is random.
Parameters
----------
aseq : list
Degree sequence for node set A.
p : float
Probability that a new bottom node is added.
create_using : NetworkX graph instance, optional
Return graph of this type.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
References
----------
.. [1] Guillaume, J.L. and Latapy, M.,
Bipartite graphs as models of complex networks.
Physica A: Statistical Mechanics and its Applications,
2006, 371(2), pp.795-813.
.. [2] Jean-Loup Guillaume and Matthieu Latapy,
Bipartite structure of all complex networks,
Inf. Process. Lett. 90, 2004, pg. 215-221
https://doi.org/10.1016/j.ipl.2004.03.007
Notes
-----
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
To use it use nx.bipartite.preferential_attachment_graph
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
if p > 1:
raise nx.NetworkXError(f"probability {p} > 1")
naseq = len(aseq)
G = _add_nodes_with_bipartite_label(G, naseq, 0)
vv = [[v] * aseq[v] for v in range(0, naseq)]
while vv:
while vv[0]:
source = vv[0][0]
vv[0].remove(source)
if seed.random() < p or len(G) == naseq:
target = len(G)
G.add_node(target, bipartite=1)
G.add_edge(source, target)
else:
bb = [[b] * G.degree(b) for b in range(naseq, len(G))]
# flatten the list of lists into a list.
bbstubs = reduce(lambda x, y: x + y, bb)
# choose preferentially a bottom node.
target = seed.choice(bbstubs)
G.add_node(target, bipartite=1)
G.add_edge(source, target)
vv.remove(vv[0])
G.name = "bipartite_preferential_attachment_model"
return G
@py_random_state(3)
def random_graph(n, m, p, seed=None, directed=False):
"""Returns a bipartite random graph.
This is a bipartite version of the binomial (Erdős-Rényi) graph.
The graph is composed of two partitions. Set A has nodes 0 to
(n - 1) and set B has nodes n to (n + m - 1).
Parameters
----------
n : int
The number of nodes in the first bipartite set.
m : int
The number of nodes in the second bipartite set.
p : float
Probability for edge creation.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : bool, optional (default=False)
If True return a directed graph
Notes
-----
The bipartite random graph algorithm chooses each of the n*m (undirected)
or 2*nm (directed) possible edges with probability p.
This algorithm is $O(n+m)$ where $m$ is the expected number of edges.
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
To use it use nx.bipartite.random_graph
See Also
--------
gnp_random_graph, configuration_model
References
----------
.. [1] Vladimir Batagelj and Ulrik Brandes,
"Efficient generation of large random networks",
Phys. Rev. E, 71, 036113, 2005.
"""
G = nx.Graph()
G = _add_nodes_with_bipartite_label(G, n, m)
if directed:
G = nx.DiGraph(G)
G.name = f"fast_gnp_random_graph({n},{m},{p})"
if p <= 0:
return G
if p >= 1:
return nx.complete_bipartite_graph(n, m)
lp = math.log(1.0 - p)
v = 0
w = -1
while v < n:
lr = math.log(1.0 - seed.random())
w = w + 1 + int(lr / lp)
while w >= m and v < n:
w = w - m
v = v + 1
if v < n:
G.add_edge(v, n + w)
if directed:
# use the same algorithm to
# add edges from the "m" to "n" set
v = 0
w = -1
while v < n:
lr = math.log(1.0 - seed.random())
w = w + 1 + int(lr / lp)
while w >= m and v < n:
w = w - m
v = v + 1
if v < n:
G.add_edge(n + w, v)
return G
@py_random_state(3)
def gnmk_random_graph(n, m, k, seed=None, directed=False):
"""Returns a random bipartite graph G_{n,m,k}.
Produces a bipartite graph chosen randomly out of the set of all graphs
with n top nodes, m bottom nodes, and k edges.
The graph is composed of two sets of nodes.
Set A has nodes 0 to (n - 1) and set B has nodes n to (n + m - 1).
Parameters
----------
n : int
The number of nodes in the first bipartite set.
m : int
The number of nodes in the second bipartite set.
k : int
The number of edges
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : bool, optional (default=False)
If True return a directed graph
Examples
--------
from nx.algorithms import bipartite
G = bipartite.gnmk_random_graph(10,20,50)
See Also
--------
gnm_random_graph
Notes
-----
If k > m * n then a complete bipartite graph is returned.
This graph is a bipartite version of the `G_{nm}` random graph model.
The nodes are assigned the attribute 'bipartite' with the value 0 or 1
to indicate which bipartite set the node belongs to.
This function is not imported in the main namespace.
To use it use nx.bipartite.gnmk_random_graph
"""
G = nx.Graph()
G = _add_nodes_with_bipartite_label(G, n, m)
if directed:
G = nx.DiGraph(G)
G.name = f"bipartite_gnm_random_graph({n},{m},{k})"
if n == 1 or m == 1:
return G
max_edges = n * m # max_edges for bipartite networks
if k >= max_edges: # Maybe we should raise an exception here
return nx.complete_bipartite_graph(n, m, create_using=G)
top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0]
bottom = list(set(G) - set(top))
edge_count = 0
while edge_count < k:
# generate random edge,u,v
u = seed.choice(top)
v = seed.choice(bottom)
if v in G[u]:
continue
else:
G.add_edge(u, v)
edge_count += 1
return G
def _add_nodes_with_bipartite_label(G, lena, lenb):
G.add_nodes_from(range(0, lena + lenb))
b = dict(zip(range(0, lena), [0] * lena))
b.update(dict(zip(range(lena, lena + lenb), [1] * lenb)))
nx.set_node_attributes(G, b, "bipartite")
return G

Some files were not shown because too many files have changed in this diff Show More