|
10 | 10 | "name": "python",
|
11 | 11 | "nbconvert_exporter": "python",
|
12 | 12 | "pygments_lexer": "ipython3",
|
13 |
| - "version": "3.8.8-final" |
| 13 | + "version": "3.8.8" |
14 | 14 | },
|
15 | 15 | "orig_nbformat": 2,
|
16 | 16 | "kernelspec": {
|
17 |
| - "name": "python388jvsc74a57bd083ad9dc287f1bd68e1373a062e5fec25449c786b53be0804b995b765c2d61fc9", |
18 |
| - "display_name": "Python 3.8.8 64-bit (conda)" |
| 17 | + "name": "python3", |
| 18 | + "display_name": "Python 3.8.8 64-bit ('base': conda)" |
| 19 | + }, |
| 20 | + "interpreter": { |
| 21 | + "hash": "83ad9dc287f1bd68e1373a062e5fec25449c786b53be0804b995b765c2d61fc9" |
19 | 22 | }
|
20 | 23 | },
|
21 | 24 | "nbformat": 4,
|
|
25 | 28 | "cell_type": "markdown",
|
26 | 29 | "metadata": {},
|
27 | 30 | "source": [
|
28 |
| - "Description: the Jupyter Notebook file containing the code for getting and briefly validating data of stock symbols and corresponding names\n", |
| 31 | + "Description: the Jupyter Notebook file containing the code for getting, validating, and updating data of stock symbols and corresponding names\n", |
29 | 32 | "\n",
|
30 |
| - "Version: 1.0.2.20210121\n", |
| 33 | + "Version: 1.1.0.20210620\n", |
31 | 34 | "\n",
|
32 | 35 | "Author: Arvin Zhao\n",
|
33 | 36 | "\n",
|
34 | 37 | "Last editors: Arvin Zhao\n",
|
35 | 38 | "\n",
|
36 |
| - "Last time when data was retrieved: 2021-01-17 12:36:00" |
| 39 | + "Last time when data was retrieved: 2021-06-20 12:36:00" |
37 | 40 | ]
|
38 | 41 | },
|
39 | 42 | {
|
|
54 | 57 | "output_type": "stream",
|
55 | 58 | "name": "stdout",
|
56 | 59 | "text": [
|
57 |
| - "The number of stocks: 4224 \nThe first 10 records:\n ts_code name\n0 000001.SZ 平安银行\n1 000002.SZ 万科A\n2 000004.SZ 国华网安\n3 000005.SZ 世纪星源\n4 000006.SZ 深振业A\n5 000007.SZ 全新好\n6 000008.SZ 神州高铁\n7 000009.SZ 中国宝安\n8 000010.SZ 美丽生态\n9 000011.SZ 深物业A\n" |
| 60 | + "The number of stocks: 4347 \nThe first 10 records:\n ts_code name\n0 000001.SZ 平安银行\n1 000002.SZ 万科A\n2 000004.SZ 国华网安\n3 000005.SZ ST星源\n4 000006.SZ 深振业A\n5 000007.SZ *ST全新\n6 000008.SZ 神州高铁\n7 000009.SZ 中国宝安\n8 000010.SZ 美丽生态\n9 000011.SZ 深物业A\n" |
58 | 61 | ]
|
59 | 62 | }
|
60 | 63 | ],
|
61 | 64 | "source": [
|
62 | 65 | "import tushare as ts\n",
|
63 | 66 | "\n",
|
64 |
| - "ts_pro = ts.pro_api('f5bf618f99204f6f3cc805606ae262f6f514e526c7ad950ce2e4214d') # Initialise Tushare Pro API with my token.\n", |
65 |
| - "data = ts_pro.stock_basic(exchange = '', list_status = 'L', fields = 'ts_code, name') # Use the specified API to get a DataFrame object containing the specified data.\n", |
| 67 | + "ts_pro = ts.pro_api('f5bf618f99204f6f3cc805606ae262f6f514e526c7ad950ce2e4214d') # Initialise Tushare Pro API with my token.\n", |
| 68 | + "data = ts_pro.stock_basic(exchange = '', list_status = 'L', fields = 'ts_code, name') # Use the specified API to get a DataFrame object containing the specified data.\n", |
66 | 69 | "\n",
|
67 | 70 | "print('The number of stocks:', data.shape[0], '\\nThe first 10 records:\\n', data.head(n = 10))"
|
68 | 71 | ]
|
|
96 | 99 | "cell_type": "markdown",
|
97 | 100 | "metadata": {},
|
98 | 101 | "source": [
|
99 |
| - "<h2>Brief Validation</h2>\n", |
| 102 | + "<h2>Brief Validation & Update</h2>\n", |
100 | 103 | "\n",
|
101 | 104 | "\"mbcs\" represents ASCII encoding. **Please do use Notepad instead of other editors like VS Code to modify the data file to ensure the encoding is not changed to UTF-8 or the others.**"
|
102 | 105 | ]
|
103 | 106 | },
|
104 | 107 | {
|
105 | 108 | "cell_type": "code",
|
106 |
| - "execution_count": 5, |
| 109 | + "execution_count": 4, |
107 | 110 | "metadata": {
|
108 | 111 | "tags": []
|
109 | 112 | },
|
|
112 | 115 | "output_type": "stream",
|
113 | 116 | "name": "stdout",
|
114 | 117 | "text": [
|
115 |
| - "Same data? False\n" |
| 118 | + "Same data? False\nSame data now? True\n" |
116 | 119 | ]
|
117 | 120 | }
|
118 | 121 | ],
|
119 | 122 | "source": [
|
120 |
| - "import json\n", |
| 123 | + "import json, shutil\n", |
| 124 | + "\n", |
| 125 | + "target_dir = os.path.join(os.path.dirname(os.getcwd()), 'ShSzStockHelper', 'Resources') # Locate the app development directory containing a data file pending comparison/update.\n", |
| 126 | + "path_previous_data = os.path.join(target_dir, data_filename) # Locate the data file in the specified app development directory.\n", |
121 | 127 | "\n",
|
122 |
| - "list_new_data = json.load(open(data_filename, encoding = 'mbcs'))\n", |
123 |
| - "list_previous_data = json.load(open(os.path.join(os.path.dirname(os.getcwd()), 'ShSzStockHelper', 'Resources', data_filename), encoding = 'mbcs'))\n", |
| 128 | + "if os.path.exists(data_filename):\n", |
| 129 | + " if os.path.exists(target_dir):\n", |
| 130 | + " is_same = False\n", |
124 | 131 | "\n",
|
125 |
| - "symbol_key_name = 'ts_code'\n", |
| 132 | + " if os.path.exists(path_previous_data):\n", |
| 133 | + " list_new_data = json.load(open(data_filename, encoding = 'mbcs'))\n", |
| 134 | + " list_previous_data = json.load(open(path_previous_data, encoding = 'mbcs'))\n", |
| 135 | + " symbol_key_name = 'ts_code'\n", |
| 136 | + " is_same = sorted(list_new_data, key = lambda x : x[symbol_key_name]) == sorted(list_previous_data, key = lambda x : x[symbol_key_name])\n", |
| 137 | + " print('Same data?', is_same)\n", |
126 | 138 | "\n",
|
127 |
| - "print('Same data?', sorted(list_new_data, key = lambda x : x[symbol_key_name]) == sorted(list_previous_data, key = lambda x : x[symbol_key_name]))" |
| 139 | + " if not is_same:\n", |
| 140 | + " shutil.copy2(data_filename, target_dir)\n", |
| 141 | + " list_previous_data = json.load(open(path_previous_data, encoding = 'mbcs'))\n", |
| 142 | + " is_same = sorted(list_new_data, key = lambda x : x[symbol_key_name]) == sorted(list_previous_data, key = lambda x : x[symbol_key_name])\n", |
| 143 | + " print('Same data now?', is_same)\n", |
| 144 | + " else:\n", |
| 145 | + " print('Error! The specified app development directory does not exist.')\n", |
| 146 | + "else:\n", |
| 147 | + " print('Error! Please first get new data.')" |
128 | 148 | ]
|
129 | 149 | }
|
130 | 150 | ]
|
|
0 commit comments