-
Notifications
You must be signed in to change notification settings - Fork 25
Expand file tree
/
Copy pathpyproject.toml
More file actions
119 lines (109 loc) · 2.7 KB
/
pyproject.toml
File metadata and controls
119 lines (109 loc) · 2.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
[build-system]
requires = ["setuptools>=64", "setuptools-scm>=8"]
build-backend = "setuptools.build_meta"
[project]
name = "sqlframe"
description = "Turning PySpark Into a Universal DataFrame API"
readme = "README.md"
license = { text = "MIT" }
authors = [{ name = "Ryan Eakman", email = "eakmanrq@gmail.com" }]
requires-python = ">=3.10"
dynamic = ["version"]
dependencies = [
"prettytable<4",
"sqlglot>=28.0.0,<30.1",
"typing_extensions",
"more-itertools",
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: SQL",
"Programming Language :: Python :: 3 :: Only",
]
[project.urls]
Homepage = "https://github.com/eakmanrq/sqlframe"
[project.optional-dependencies]
bigquery = [
"google-cloud-bigquery[pandas]>=3,<4",
"google-cloud-bigquery-storage>=2,<3",
]
databricks = [
"databricks-sql-connector[pyarrow]>=3.6,<5",
]
dev = [
"duckdb>=1.2,<1.6",
"findspark>=2,<3",
# 3.0.0 dropped support for Python 3.9 and this is needed by pytest-postgresql
"mirakuru<3.0.3",
"ty>=0.0.18",
"openai>=1.30,<3",
"pandas>=2,<4",
"pandas-stubs>=2,<4",
"psycopg>=3.1,<4",
"pyarrow>=10,<24",
"pyspark>=4,<4.2",
"pytest>=8.2.0,<9.1",
"pytest-forked",
"pytest-postgresql>=6,<9",
"pytest-rerunfailures",
"pytest-xdist>=3.6,<3.9",
"pre-commit>=3.7,<5",
"ruff>=0.4.4,<0.16",
"types-psycopg2>=2.9,<3",
]
docs = [
"mkdocs==1.4.2",
"mkdocs-include-markdown-plugin==6.0.6",
"mkdocs-material==9.0.5",
"mkdocs-material-extensions==1.1.1",
"pymdown-extensions",
]
duckdb = [
"duckdb>=1.2,<1.6",
"pandas>=2,<4",
]
openai = [
"openai>=1.30,<3",
]
pandas = [
"pandas>=2,<4",
]
postgres = [
"psycopg2>=2.8,<3",
]
redshift = [
"redshift_connector>=2.1.1,<2.2.0",
]
snowflake = [
"snowflake-connector-python[secure-local-storage]>=3.10.0,<4.5",
]
spark = [
"pyspark>=4,<4.2",
]
[tool.setuptools.packages.find]
include = ["sqlframe", "sqlframe.*"]
[tool.setuptools.package-data]
sqlframe = ["py.typed", "*.pyi", "**/*.pyi"]
[tool.setuptools_scm]
version_file = "sqlframe/_version.py"
fallback_version = "0.0.0"
local_scheme = "no-local-version"
[tool.pytest.ini_options]
markers = [
"bigquery: test for BigQuery",
"duckdb: test for DuckDB",
"local: tests that don't rely on external connections",
"postgres: test for Postgres",
]
addopts = "-n 0 --dist=loadgroup"
[tool.ty.rules]
unresolved-import = "ignore"
[tool.ruff]
line-length = 100
[tool.ruff.lint]
select = ["I"]
ignore = ["E721", "E741"]